.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * linux/arch/arm/mm/mmu.c |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 1995-2005 Russell King |
---|
5 | | - * |
---|
6 | | - * This program is free software; you can redistribute it and/or modify |
---|
7 | | - * it under the terms of the GNU General Public License version 2 as |
---|
8 | | - * published by the Free Software Foundation. |
---|
9 | 6 | */ |
---|
10 | 7 | #include <linux/module.h> |
---|
11 | 8 | #include <linux/kernel.h> |
---|
.. | .. |
---|
20 | 17 | |
---|
21 | 18 | #include <asm/cp15.h> |
---|
22 | 19 | #include <asm/cputype.h> |
---|
23 | | -#include <asm/sections.h> |
---|
24 | 20 | #include <asm/cachetype.h> |
---|
25 | 21 | #include <asm/fixmap.h> |
---|
26 | 22 | #include <asm/sections.h> |
---|
.. | .. |
---|
32 | 28 | #include <asm/traps.h> |
---|
33 | 29 | #include <asm/procinfo.h> |
---|
34 | 30 | #include <asm/memory.h> |
---|
| 31 | +#include <asm/pgalloc.h> |
---|
35 | 32 | |
---|
36 | 33 | #include <asm/mach/arch.h> |
---|
37 | 34 | #include <asm/mach/map.h> |
---|
.. | .. |
---|
41 | 38 | #include "fault.h" |
---|
42 | 39 | #include "mm.h" |
---|
43 | 40 | #include "tcm.h" |
---|
| 41 | + |
---|
| 42 | +extern unsigned long __atags_pointer; |
---|
44 | 43 | |
---|
45 | 44 | /* |
---|
46 | 45 | * empty_zero_page is a special page that is used for |
---|
.. | .. |
---|
66 | 65 | static unsigned int ecc_mask __initdata = 0; |
---|
67 | 66 | pgprot_t pgprot_user; |
---|
68 | 67 | pgprot_t pgprot_kernel; |
---|
69 | | -pgprot_t pgprot_hyp_device; |
---|
70 | | -pgprot_t pgprot_s2; |
---|
71 | | -pgprot_t pgprot_s2_device; |
---|
72 | 68 | |
---|
73 | 69 | EXPORT_SYMBOL(pgprot_user); |
---|
74 | 70 | EXPORT_SYMBOL(pgprot_kernel); |
---|
.. | .. |
---|
78 | 74 | unsigned int cr_mask; |
---|
79 | 75 | pmdval_t pmd; |
---|
80 | 76 | pteval_t pte; |
---|
81 | | - pteval_t pte_s2; |
---|
82 | 77 | }; |
---|
83 | | - |
---|
84 | | -#ifdef CONFIG_ARM_LPAE |
---|
85 | | -#define s2_policy(policy) policy |
---|
86 | | -#else |
---|
87 | | -#define s2_policy(policy) 0 |
---|
88 | | -#endif |
---|
89 | | - |
---|
90 | | -unsigned long kimage_voffset __ro_after_init; |
---|
91 | 78 | |
---|
92 | 79 | static struct cachepolicy cache_policies[] __initdata = { |
---|
93 | 80 | { |
---|
.. | .. |
---|
95 | 82 | .cr_mask = CR_W|CR_C, |
---|
96 | 83 | .pmd = PMD_SECT_UNCACHED, |
---|
97 | 84 | .pte = L_PTE_MT_UNCACHED, |
---|
98 | | - .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED), |
---|
99 | 85 | }, { |
---|
100 | 86 | .policy = "buffered", |
---|
101 | 87 | .cr_mask = CR_C, |
---|
102 | 88 | .pmd = PMD_SECT_BUFFERED, |
---|
103 | 89 | .pte = L_PTE_MT_BUFFERABLE, |
---|
104 | | - .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED), |
---|
105 | 90 | }, { |
---|
106 | 91 | .policy = "writethrough", |
---|
107 | 92 | .cr_mask = 0, |
---|
108 | 93 | .pmd = PMD_SECT_WT, |
---|
109 | 94 | .pte = L_PTE_MT_WRITETHROUGH, |
---|
110 | | - .pte_s2 = s2_policy(L_PTE_S2_MT_WRITETHROUGH), |
---|
111 | 95 | }, { |
---|
112 | 96 | .policy = "writeback", |
---|
113 | 97 | .cr_mask = 0, |
---|
114 | 98 | .pmd = PMD_SECT_WB, |
---|
115 | 99 | .pte = L_PTE_MT_WRITEBACK, |
---|
116 | | - .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK), |
---|
117 | 100 | }, { |
---|
118 | 101 | .policy = "writealloc", |
---|
119 | 102 | .cr_mask = 0, |
---|
120 | 103 | .pmd = PMD_SECT_WBWA, |
---|
121 | 104 | .pte = L_PTE_MT_WRITEALLOC, |
---|
122 | | - .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK), |
---|
123 | 105 | } |
---|
124 | 106 | }; |
---|
125 | 107 | |
---|
.. | .. |
---|
230 | 212 | static int __init early_cachepolicy(char *p) |
---|
231 | 213 | { |
---|
232 | 214 | pr_warn("cachepolicy kernel parameter not supported without cp15\n"); |
---|
| 215 | + return 0; |
---|
233 | 216 | } |
---|
234 | 217 | early_param("cachepolicy", early_cachepolicy); |
---|
235 | 218 | |
---|
236 | 219 | static int __init noalign_setup(char *__unused) |
---|
237 | 220 | { |
---|
238 | 221 | pr_warn("noalign kernel parameter not supported without cp15\n"); |
---|
| 222 | + return 1; |
---|
239 | 223 | } |
---|
240 | 224 | __setup("noalign", noalign_setup); |
---|
241 | 225 | |
---|
.. | .. |
---|
249 | 233 | [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ |
---|
250 | 234 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | |
---|
251 | 235 | L_PTE_SHARED, |
---|
252 | | - .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) | |
---|
253 | | - s2_policy(L_PTE_S2_MT_DEV_SHARED) | |
---|
254 | | - L_PTE_SHARED, |
---|
255 | 236 | .prot_l1 = PMD_TYPE_TABLE, |
---|
256 | 237 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, |
---|
257 | 238 | .domain = DOMAIN_IO, |
---|
.. | .. |
---|
262 | 243 | .prot_sect = PROT_SECT_DEVICE, |
---|
263 | 244 | .domain = DOMAIN_IO, |
---|
264 | 245 | }, |
---|
265 | | - [MT_DEVICE_CACHED] = { /* ioremap_cached */ |
---|
| 246 | + [MT_DEVICE_CACHED] = { /* ioremap_cache */ |
---|
266 | 247 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED, |
---|
267 | 248 | .prot_l1 = PMD_TYPE_TABLE, |
---|
268 | 249 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, |
---|
.. | .. |
---|
313 | 294 | L_PTE_XN, |
---|
314 | 295 | .prot_l1 = PMD_TYPE_TABLE, |
---|
315 | 296 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
---|
| 297 | + .domain = DOMAIN_KERNEL, |
---|
| 298 | + }, |
---|
| 299 | + [MT_MEMORY_RO] = { |
---|
| 300 | + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
---|
| 301 | + L_PTE_XN | L_PTE_RDONLY, |
---|
| 302 | + .prot_l1 = PMD_TYPE_TABLE, |
---|
| 303 | +#ifdef CONFIG_ARM_LPAE |
---|
| 304 | + .prot_sect = PMD_TYPE_SECT | L_PMD_SECT_RDONLY | PMD_SECT_AP2, |
---|
| 305 | +#else |
---|
| 306 | + .prot_sect = PMD_TYPE_SECT, |
---|
| 307 | +#endif |
---|
316 | 308 | .domain = DOMAIN_KERNEL, |
---|
317 | 309 | }, |
---|
318 | 310 | [MT_ROM] = { |
---|
.. | .. |
---|
377 | 369 | |
---|
378 | 370 | static inline pmd_t * __init fixmap_pmd(unsigned long addr) |
---|
379 | 371 | { |
---|
380 | | - pgd_t *pgd = pgd_offset_k(addr); |
---|
381 | | - pud_t *pud = pud_offset(pgd, addr); |
---|
382 | | - pmd_t *pmd = pmd_offset(pud, addr); |
---|
383 | | - |
---|
384 | | - return pmd; |
---|
| 372 | + return pmd_off_k(addr); |
---|
385 | 373 | } |
---|
386 | 374 | |
---|
387 | 375 | void __init early_fixmap_init(void) |
---|
.. | .. |
---|
437 | 425 | struct cachepolicy *cp; |
---|
438 | 426 | unsigned int cr = get_cr(); |
---|
439 | 427 | pteval_t user_pgprot, kern_pgprot, vecs_pgprot; |
---|
440 | | - pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot; |
---|
441 | 428 | int cpu_arch = cpu_architecture(); |
---|
442 | 429 | int i; |
---|
443 | 430 | |
---|
.. | .. |
---|
514 | 501 | |
---|
515 | 502 | /* Also setup NX memory mapping */ |
---|
516 | 503 | mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN; |
---|
| 504 | + mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_XN; |
---|
517 | 505 | } |
---|
518 | 506 | if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { |
---|
519 | 507 | /* |
---|
.. | .. |
---|
561 | 549 | */ |
---|
562 | 550 | cp = &cache_policies[cachepolicy]; |
---|
563 | 551 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; |
---|
564 | | - s2_pgprot = cp->pte_s2; |
---|
565 | | - hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte; |
---|
566 | | - s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2; |
---|
567 | 552 | |
---|
568 | 553 | #ifndef CONFIG_ARM_LPAE |
---|
569 | 554 | /* |
---|
.. | .. |
---|
596 | 581 | mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
---|
597 | 582 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
---|
598 | 583 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
---|
| 584 | + mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
---|
599 | 585 | #endif |
---|
600 | 586 | |
---|
601 | 587 | /* |
---|
.. | .. |
---|
607 | 593 | user_pgprot |= L_PTE_SHARED; |
---|
608 | 594 | kern_pgprot |= L_PTE_SHARED; |
---|
609 | 595 | vecs_pgprot |= L_PTE_SHARED; |
---|
610 | | - s2_pgprot |= L_PTE_SHARED; |
---|
611 | 596 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; |
---|
612 | 597 | mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; |
---|
613 | 598 | mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; |
---|
.. | .. |
---|
616 | 601 | mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED; |
---|
617 | 602 | mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S; |
---|
618 | 603 | mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED; |
---|
| 604 | + mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_S; |
---|
| 605 | + mem_types[MT_MEMORY_RO].prot_pte |= L_PTE_SHARED; |
---|
619 | 606 | mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; |
---|
620 | 607 | mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S; |
---|
621 | 608 | mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED; |
---|
.. | .. |
---|
669 | 656 | pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); |
---|
670 | 657 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | |
---|
671 | 658 | L_PTE_DIRTY | kern_pgprot); |
---|
672 | | - pgprot_s2 = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot); |
---|
673 | | - pgprot_s2_device = __pgprot(s2_device_pgprot); |
---|
674 | | - pgprot_hyp_device = __pgprot(hyp_device_pgprot); |
---|
675 | 659 | |
---|
676 | 660 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; |
---|
677 | 661 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; |
---|
.. | .. |
---|
679 | 663 | mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot; |
---|
680 | 664 | mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; |
---|
681 | 665 | mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot; |
---|
| 666 | + mem_types[MT_MEMORY_RO].prot_sect |= ecc_mask | cp->pmd; |
---|
| 667 | + mem_types[MT_MEMORY_RO].prot_pte |= kern_pgprot; |
---|
682 | 668 | mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; |
---|
683 | 669 | mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask; |
---|
684 | 670 | mem_types[MT_ROM].prot_sect |= cp->pmd; |
---|
.. | .. |
---|
719 | 705 | |
---|
720 | 706 | #define vectors_base() (vectors_high() ? 0xffff0000 : 0) |
---|
721 | 707 | |
---|
722 | | -static void __init *early_alloc_aligned(unsigned long sz, unsigned long align) |
---|
723 | | -{ |
---|
724 | | - void *ptr = __va(memblock_alloc(sz, align)); |
---|
725 | | - memset(ptr, 0, sz); |
---|
726 | | - return ptr; |
---|
727 | | -} |
---|
728 | | - |
---|
729 | 708 | static void __init *early_alloc(unsigned long sz) |
---|
730 | 709 | { |
---|
731 | | - return early_alloc_aligned(sz, sz); |
---|
| 710 | + void *ptr = memblock_alloc(sz, sz); |
---|
| 711 | + |
---|
| 712 | + if (!ptr) |
---|
| 713 | + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", |
---|
| 714 | + __func__, sz, sz); |
---|
| 715 | + |
---|
| 716 | + return ptr; |
---|
732 | 717 | } |
---|
733 | 718 | |
---|
734 | 719 | static void *__init late_alloc(unsigned long sz) |
---|
735 | 720 | { |
---|
736 | | - void *ptr = (void *)__get_free_pages(PGALLOC_GFP, get_order(sz)); |
---|
| 721 | + void *ptr = (void *)__get_free_pages(GFP_PGTABLE_KERNEL, get_order(sz)); |
---|
737 | 722 | |
---|
738 | | - if (!ptr || !pgtable_page_ctor(virt_to_page(ptr))) |
---|
| 723 | + if (!ptr || !pgtable_pte_page_ctor(virt_to_page(ptr))) |
---|
739 | 724 | BUG(); |
---|
740 | 725 | return ptr; |
---|
741 | 726 | } |
---|
.. | .. |
---|
831 | 816 | } while (pmd++, addr = next, addr != end); |
---|
832 | 817 | } |
---|
833 | 818 | |
---|
834 | | -static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, |
---|
| 819 | +static void __init alloc_init_pud(p4d_t *p4d, unsigned long addr, |
---|
835 | 820 | unsigned long end, phys_addr_t phys, |
---|
836 | 821 | const struct mem_type *type, |
---|
837 | 822 | void *(*alloc)(unsigned long sz), bool ng) |
---|
838 | 823 | { |
---|
839 | | - pud_t *pud = pud_offset(pgd, addr); |
---|
| 824 | + pud_t *pud = pud_offset(p4d, addr); |
---|
840 | 825 | unsigned long next; |
---|
841 | 826 | |
---|
842 | 827 | do { |
---|
.. | .. |
---|
844 | 829 | alloc_init_pmd(pud, addr, next, phys, type, alloc, ng); |
---|
845 | 830 | phys += next - addr; |
---|
846 | 831 | } while (pud++, addr = next, addr != end); |
---|
| 832 | +} |
---|
| 833 | + |
---|
| 834 | +static void __init alloc_init_p4d(pgd_t *pgd, unsigned long addr, |
---|
| 835 | + unsigned long end, phys_addr_t phys, |
---|
| 836 | + const struct mem_type *type, |
---|
| 837 | + void *(*alloc)(unsigned long sz), bool ng) |
---|
| 838 | +{ |
---|
| 839 | + p4d_t *p4d = p4d_offset(pgd, addr); |
---|
| 840 | + unsigned long next; |
---|
| 841 | + |
---|
| 842 | + do { |
---|
| 843 | + next = p4d_addr_end(addr, end); |
---|
| 844 | + alloc_init_pud(p4d, addr, next, phys, type, alloc, ng); |
---|
| 845 | + phys += next - addr; |
---|
| 846 | + } while (p4d++, addr = next, addr != end); |
---|
847 | 847 | } |
---|
848 | 848 | |
---|
849 | 849 | #ifndef CONFIG_ARM_LPAE |
---|
.. | .. |
---|
893 | 893 | pgd = pgd_offset(mm, addr); |
---|
894 | 894 | end = addr + length; |
---|
895 | 895 | do { |
---|
896 | | - pud_t *pud = pud_offset(pgd, addr); |
---|
| 896 | + p4d_t *p4d = p4d_offset(pgd, addr); |
---|
| 897 | + pud_t *pud = pud_offset(p4d, addr); |
---|
897 | 898 | pmd_t *pmd = pmd_offset(pud, addr); |
---|
898 | 899 | int i; |
---|
899 | 900 | |
---|
.. | .. |
---|
944 | 945 | do { |
---|
945 | 946 | unsigned long next = pgd_addr_end(addr, end); |
---|
946 | 947 | |
---|
947 | | - alloc_init_pud(pgd, addr, next, phys, type, alloc, ng); |
---|
| 948 | + alloc_init_p4d(pgd, addr, next, phys, type, alloc, ng); |
---|
948 | 949 | |
---|
949 | 950 | phys += next - addr; |
---|
950 | 951 | addr = next; |
---|
.. | .. |
---|
966 | 967 | return; |
---|
967 | 968 | } |
---|
968 | 969 | |
---|
969 | | - if ((md->type == MT_DEVICE || md->type == MT_ROM) && |
---|
| 970 | + if (md->type == MT_DEVICE && |
---|
970 | 971 | md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START && |
---|
971 | 972 | (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { |
---|
972 | 973 | pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n", |
---|
.. | .. |
---|
980 | 981 | bool ng) |
---|
981 | 982 | { |
---|
982 | 983 | #ifdef CONFIG_ARM_LPAE |
---|
983 | | - pud_t *pud = pud_alloc(mm, pgd_offset(mm, md->virtual), md->virtual); |
---|
| 984 | + p4d_t *p4d; |
---|
| 985 | + pud_t *pud; |
---|
| 986 | + |
---|
| 987 | + p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual); |
---|
| 988 | + if (WARN_ON(!p4d)) |
---|
| 989 | + return; |
---|
| 990 | + pud = pud_alloc(mm, p4d, md->virtual); |
---|
984 | 991 | if (WARN_ON(!pud)) |
---|
985 | 992 | return; |
---|
986 | 993 | pmd_alloc(mm, pud, 0); |
---|
.. | .. |
---|
1000 | 1007 | if (!nr) |
---|
1001 | 1008 | return; |
---|
1002 | 1009 | |
---|
1003 | | - svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm)); |
---|
| 1010 | + svm = memblock_alloc(sizeof(*svm) * nr, __alignof__(*svm)); |
---|
| 1011 | + if (!svm) |
---|
| 1012 | + panic("%s: Failed to allocate %zu bytes align=0x%zx\n", |
---|
| 1013 | + __func__, sizeof(*svm) * nr, __alignof__(*svm)); |
---|
1004 | 1014 | |
---|
1005 | 1015 | for (md = io_desc; nr; md++, nr--) { |
---|
1006 | 1016 | create_mapping(md); |
---|
.. | .. |
---|
1022 | 1032 | struct vm_struct *vm; |
---|
1023 | 1033 | struct static_vm *svm; |
---|
1024 | 1034 | |
---|
1025 | | - svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm)); |
---|
| 1035 | + svm = memblock_alloc(sizeof(*svm), __alignof__(*svm)); |
---|
| 1036 | + if (!svm) |
---|
| 1037 | + panic("%s: Failed to allocate %zu bytes align=0x%zx\n", |
---|
| 1038 | + __func__, sizeof(*svm), __alignof__(*svm)); |
---|
1026 | 1039 | |
---|
1027 | 1040 | vm = &svm->vm; |
---|
1028 | 1041 | vm->addr = (void *)addr; |
---|
.. | .. |
---|
1161 | 1174 | |
---|
1162 | 1175 | void __init adjust_lowmem_bounds(void) |
---|
1163 | 1176 | { |
---|
1164 | | - phys_addr_t memblock_limit = 0; |
---|
1165 | | - u64 vmalloc_limit; |
---|
1166 | | - struct memblock_region *reg; |
---|
| 1177 | + phys_addr_t block_start, block_end, memblock_limit = 0; |
---|
| 1178 | + u64 vmalloc_limit, i; |
---|
1167 | 1179 | phys_addr_t lowmem_limit = 0; |
---|
1168 | 1180 | |
---|
1169 | 1181 | /* |
---|
.. | .. |
---|
1179 | 1191 | * The first usable region must be PMD aligned. Mark its start |
---|
1180 | 1192 | * as MEMBLOCK_NOMAP if it isn't |
---|
1181 | 1193 | */ |
---|
1182 | | - for_each_memblock(memory, reg) { |
---|
1183 | | - if (!memblock_is_nomap(reg)) { |
---|
1184 | | - if (!IS_ALIGNED(reg->base, PMD_SIZE)) { |
---|
1185 | | - phys_addr_t len; |
---|
| 1194 | + for_each_mem_range(i, &block_start, &block_end) { |
---|
| 1195 | + if (!IS_ALIGNED(block_start, PMD_SIZE)) { |
---|
| 1196 | + phys_addr_t len; |
---|
1186 | 1197 | |
---|
1187 | | - len = round_up(reg->base, PMD_SIZE) - reg->base; |
---|
1188 | | - memblock_mark_nomap(reg->base, len); |
---|
1189 | | - } |
---|
1190 | | - break; |
---|
| 1198 | + len = round_up(block_start, PMD_SIZE) - block_start; |
---|
| 1199 | + memblock_mark_nomap(block_start, len); |
---|
1191 | 1200 | } |
---|
| 1201 | + break; |
---|
1192 | 1202 | } |
---|
1193 | 1203 | |
---|
1194 | | - for_each_memblock(memory, reg) { |
---|
1195 | | - phys_addr_t block_start = reg->base; |
---|
1196 | | - phys_addr_t block_end = reg->base + reg->size; |
---|
1197 | | - |
---|
1198 | | - if (memblock_is_nomap(reg)) |
---|
1199 | | - continue; |
---|
1200 | | - |
---|
1201 | | - if (reg->base < vmalloc_limit) { |
---|
| 1204 | + for_each_mem_range(i, &block_start, &block_end) { |
---|
| 1205 | + if (block_start < vmalloc_limit) { |
---|
1202 | 1206 | if (block_end > lowmem_limit) |
---|
1203 | 1207 | /* |
---|
1204 | 1208 | * Compare as u64 to ensure vmalloc_limit does |
---|
.. | .. |
---|
1350 | 1354 | for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE) |
---|
1351 | 1355 | pmd_clear(pmd_off_k(addr)); |
---|
1352 | 1356 | |
---|
| 1357 | + if (__atags_pointer) { |
---|
| 1358 | + /* create a read-only mapping of the device tree */ |
---|
| 1359 | + map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK); |
---|
| 1360 | + map.virtual = FDT_FIXED_BASE; |
---|
| 1361 | + map.length = FDT_FIXED_SIZE; |
---|
| 1362 | + map.type = MT_MEMORY_RO; |
---|
| 1363 | + create_mapping(&map); |
---|
| 1364 | + } |
---|
| 1365 | + |
---|
1353 | 1366 | /* |
---|
1354 | 1367 | * Map the kernel if it is XIP. |
---|
1355 | 1368 | * It is always first in the modulearea. |
---|
.. | .. |
---|
1447 | 1460 | |
---|
1448 | 1461 | static void __init map_lowmem(void) |
---|
1449 | 1462 | { |
---|
1450 | | - struct memblock_region *reg; |
---|
1451 | 1463 | phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE); |
---|
1452 | 1464 | phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); |
---|
| 1465 | + phys_addr_t start, end; |
---|
| 1466 | + u64 i; |
---|
1453 | 1467 | |
---|
1454 | 1468 | /* Map all the lowmem memory banks. */ |
---|
1455 | | - for_each_memblock(memory, reg) { |
---|
1456 | | - phys_addr_t start = reg->base; |
---|
1457 | | - phys_addr_t end = start + reg->size; |
---|
| 1469 | + for_each_mem_range(i, &start, &end) { |
---|
1458 | 1470 | struct map_desc map; |
---|
1459 | | - |
---|
1460 | | - if (memblock_is_nomap(reg)) |
---|
1461 | | - continue; |
---|
1462 | 1471 | |
---|
1463 | 1472 | if (end > arm_lowmem_limit) |
---|
1464 | 1473 | end = arm_lowmem_limit; |
---|
.. | .. |
---|
1510 | 1519 | } |
---|
1511 | 1520 | |
---|
1512 | 1521 | #ifdef CONFIG_ARM_PV_FIXUP |
---|
1513 | | -extern unsigned long __atags_pointer; |
---|
1514 | | -typedef void pgtables_remap(long long offset, unsigned long pgd, void *bdata); |
---|
| 1522 | +typedef void pgtables_remap(long long offset, unsigned long pgd); |
---|
1515 | 1523 | pgtables_remap lpae_pgtables_remap_asm; |
---|
1516 | 1524 | |
---|
1517 | 1525 | /* |
---|
.. | .. |
---|
1524 | 1532 | unsigned long pa_pgd; |
---|
1525 | 1533 | unsigned int cr, ttbcr; |
---|
1526 | 1534 | long long offset; |
---|
1527 | | - void *boot_data; |
---|
1528 | 1535 | |
---|
1529 | 1536 | if (!mdesc->pv_fixup) |
---|
1530 | 1537 | return; |
---|
.. | .. |
---|
1541 | 1548 | */ |
---|
1542 | 1549 | lpae_pgtables_remap = (pgtables_remap *)(unsigned long)__pa(lpae_pgtables_remap_asm); |
---|
1543 | 1550 | pa_pgd = __pa(swapper_pg_dir); |
---|
1544 | | - boot_data = __va(__atags_pointer); |
---|
1545 | 1551 | barrier(); |
---|
1546 | 1552 | |
---|
1547 | 1553 | pr_info("Switching physical address space to 0x%08llx\n", |
---|
.. | .. |
---|
1577 | 1583 | * needs to be assembly. It's fairly simple, as we're using the |
---|
1578 | 1584 | * temporary tables setup by the initial assembly code. |
---|
1579 | 1585 | */ |
---|
1580 | | - lpae_pgtables_remap(offset, pa_pgd, boot_data); |
---|
| 1586 | + lpae_pgtables_remap(offset, pa_pgd); |
---|
1581 | 1587 | |
---|
1582 | 1588 | /* Re-enable the caches and cacheable TLB walks */ |
---|
1583 | 1589 | asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr)); |
---|
.. | .. |
---|
1660 | 1666 | |
---|
1661 | 1667 | empty_zero_page = virt_to_page(zero_page); |
---|
1662 | 1668 | __flush_dcache_page(NULL, empty_zero_page); |
---|
1663 | | - |
---|
1664 | | - /* Compute the virt/idmap offset, mostly for the sake of KVM */ |
---|
1665 | | - kimage_voffset = (unsigned long)&kimage_voffset - virt_to_idmap(&kimage_voffset); |
---|
1666 | 1669 | } |
---|
1667 | 1670 | |
---|
1668 | 1671 | void __init early_mm_init(const struct machine_desc *mdesc) |
---|
.. | .. |
---|
1670 | 1673 | build_mem_type_table(); |
---|
1671 | 1674 | early_paging_init(mdesc); |
---|
1672 | 1675 | } |
---|
| 1676 | + |
---|
| 1677 | +void set_pte_at(struct mm_struct *mm, unsigned long addr, |
---|
| 1678 | + pte_t *ptep, pte_t pteval) |
---|
| 1679 | +{ |
---|
| 1680 | + unsigned long ext = 0; |
---|
| 1681 | + |
---|
| 1682 | + if (addr < TASK_SIZE && pte_valid_user(pteval)) { |
---|
| 1683 | + if (!pte_special(pteval)) |
---|
| 1684 | + __sync_icache_dcache(pteval); |
---|
| 1685 | + ext |= PTE_EXT_NG; |
---|
| 1686 | + } |
---|
| 1687 | + |
---|
| 1688 | + set_pte_ext(ptep, pteval, ext); |
---|
| 1689 | +} |
---|