.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (c) 2014, The Linux Foundation. All rights reserved. |
---|
3 | | - * |
---|
4 | | - * This program is free software; you can redistribute it and/or modify |
---|
5 | | - * it under the terms of the GNU General Public License version 2 and |
---|
6 | | - * only version 2 as published by the Free Software Foundation. |
---|
7 | | - * |
---|
8 | | - * This program is distributed in the hope that it will be useful, |
---|
9 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
10 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
11 | | - * GNU General Public License for more details. |
---|
12 | 4 | */ |
---|
13 | 5 | #include <linux/kernel.h> |
---|
14 | 6 | #include <linux/mm.h> |
---|
.. | .. |
---|
16 | 8 | #include <linux/sched.h> |
---|
17 | 9 | #include <linux/vmalloc.h> |
---|
18 | 10 | |
---|
19 | | -#include <asm/pgtable.h> |
---|
| 11 | +#include <asm/cacheflush.h> |
---|
20 | 12 | #include <asm/set_memory.h> |
---|
21 | 13 | #include <asm/tlbflush.h> |
---|
22 | 14 | |
---|
.. | .. |
---|
25 | 17 | pgprot_t clear_mask; |
---|
26 | 18 | }; |
---|
27 | 19 | |
---|
28 | | -static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr, |
---|
29 | | - void *data) |
---|
| 20 | +bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED); |
---|
| 21 | + |
---|
| 22 | +static int change_page_range(pte_t *ptep, unsigned long addr, void *data) |
---|
30 | 23 | { |
---|
31 | 24 | struct page_change_data *cdata = data; |
---|
32 | 25 | pte_t pte = READ_ONCE(*ptep); |
---|
.. | .. |
---|
61 | 54 | pgprot_t set_mask, pgprot_t clear_mask) |
---|
62 | 55 | { |
---|
63 | 56 | unsigned long start = addr; |
---|
64 | | - unsigned long size = PAGE_SIZE*numpages; |
---|
| 57 | + unsigned long size = PAGE_SIZE * numpages; |
---|
65 | 58 | unsigned long end = start + size; |
---|
66 | 59 | struct vm_struct *area; |
---|
| 60 | + int i; |
---|
67 | 61 | |
---|
68 | 62 | if (!PAGE_ALIGNED(addr)) { |
---|
69 | 63 | start &= PAGE_MASK; |
---|
.. | .. |
---|
93 | 87 | if (!numpages) |
---|
94 | 88 | return 0; |
---|
95 | 89 | |
---|
| 90 | + /* |
---|
| 91 | + * If we are manipulating read-only permissions, apply the same |
---|
| 92 | + * change to the linear mapping of the pages that back this VM area. |
---|
| 93 | + */ |
---|
| 94 | + if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY || |
---|
| 95 | + pgprot_val(clear_mask) == PTE_RDONLY)) { |
---|
| 96 | + for (i = 0; i < area->nr_pages; i++) { |
---|
| 97 | + __change_memory_common((u64)page_address(area->pages[i]), |
---|
| 98 | + PAGE_SIZE, set_mask, clear_mask); |
---|
| 99 | + } |
---|
| 100 | + } |
---|
| 101 | + |
---|
| 102 | + /* |
---|
| 103 | + * Get rid of potentially aliasing lazily unmapped vm areas that may |
---|
| 104 | + * have permissions set that deviate from the ones we are setting here. |
---|
| 105 | + */ |
---|
| 106 | + vm_unmap_aliases(); |
---|
| 107 | + |
---|
96 | 108 | return __change_memory_common(start, size, set_mask, clear_mask); |
---|
97 | 109 | } |
---|
98 | 110 | |
---|
.. | .. |
---|
114 | 126 | { |
---|
115 | 127 | return change_memory_common(addr, numpages, |
---|
116 | 128 | __pgprot(PTE_PXN), |
---|
117 | | - __pgprot(0)); |
---|
| 129 | + __pgprot(PTE_MAYBE_GP)); |
---|
118 | 130 | } |
---|
119 | | -EXPORT_SYMBOL_GPL(set_memory_nx); |
---|
120 | 131 | |
---|
121 | 132 | int set_memory_x(unsigned long addr, int numpages) |
---|
122 | 133 | { |
---|
123 | 134 | return change_memory_common(addr, numpages, |
---|
124 | | - __pgprot(0), |
---|
| 135 | + __pgprot(PTE_MAYBE_GP), |
---|
125 | 136 | __pgprot(PTE_PXN)); |
---|
126 | 137 | } |
---|
127 | | -EXPORT_SYMBOL_GPL(set_memory_x); |
---|
128 | 138 | |
---|
129 | 139 | int set_memory_valid(unsigned long addr, int numpages, int enable) |
---|
130 | 140 | { |
---|
.. | .. |
---|
138 | 148 | __pgprot(PTE_VALID)); |
---|
139 | 149 | } |
---|
140 | 150 | |
---|
141 | | -#ifdef CONFIG_DEBUG_PAGEALLOC |
---|
| 151 | +int set_direct_map_invalid_noflush(struct page *page) |
---|
| 152 | +{ |
---|
| 153 | + struct page_change_data data = { |
---|
| 154 | + .set_mask = __pgprot(0), |
---|
| 155 | + .clear_mask = __pgprot(PTE_VALID), |
---|
| 156 | + }; |
---|
| 157 | + |
---|
| 158 | + if (!rodata_full) |
---|
| 159 | + return 0; |
---|
| 160 | + |
---|
| 161 | + return apply_to_page_range(&init_mm, |
---|
| 162 | + (unsigned long)page_address(page), |
---|
| 163 | + PAGE_SIZE, change_page_range, &data); |
---|
| 164 | +} |
---|
| 165 | + |
---|
| 166 | +int set_direct_map_default_noflush(struct page *page) |
---|
| 167 | +{ |
---|
| 168 | + struct page_change_data data = { |
---|
| 169 | + .set_mask = __pgprot(PTE_VALID | PTE_WRITE), |
---|
| 170 | + .clear_mask = __pgprot(PTE_RDONLY), |
---|
| 171 | + }; |
---|
| 172 | + |
---|
| 173 | + if (!rodata_full) |
---|
| 174 | + return 0; |
---|
| 175 | + |
---|
| 176 | + return apply_to_page_range(&init_mm, |
---|
| 177 | + (unsigned long)page_address(page), |
---|
| 178 | + PAGE_SIZE, change_page_range, &data); |
---|
| 179 | +} |
---|
| 180 | + |
---|
142 | 181 | void __kernel_map_pages(struct page *page, int numpages, int enable) |
---|
143 | 182 | { |
---|
| 183 | + if (!debug_pagealloc_enabled() && !rodata_full) |
---|
| 184 | + return; |
---|
| 185 | + |
---|
144 | 186 | set_memory_valid((unsigned long)page_address(page), numpages, enable); |
---|
145 | 187 | } |
---|
146 | | -#ifdef CONFIG_HIBERNATION |
---|
| 188 | + |
---|
147 | 189 | /* |
---|
148 | | - * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function |
---|
149 | | - * is used to determine if a linear map page has been marked as not-valid by |
---|
150 | | - * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit. |
---|
151 | | - * This is based on kern_addr_valid(), which almost does what we need. |
---|
| 190 | + * This function is used to determine if a linear map page has been marked as |
---|
| 191 | + * not-valid. Walk the page table and check the PTE_VALID bit. This is based |
---|
| 192 | + * on kern_addr_valid(), which almost does what we need. |
---|
152 | 193 | * |
---|
153 | 194 | * Because this is only called on the kernel linear map, p?d_sect() implies |
---|
154 | 195 | * p?d_present(). When debug_pagealloc is enabled, sections mappings are |
---|
.. | .. |
---|
157 | 198 | bool kernel_page_present(struct page *page) |
---|
158 | 199 | { |
---|
159 | 200 | pgd_t *pgdp; |
---|
| 201 | + p4d_t *p4dp; |
---|
160 | 202 | pud_t *pudp, pud; |
---|
161 | 203 | pmd_t *pmdp, pmd; |
---|
162 | 204 | pte_t *ptep; |
---|
163 | 205 | unsigned long addr = (unsigned long)page_address(page); |
---|
164 | 206 | |
---|
| 207 | + if (!debug_pagealloc_enabled() && !rodata_full) |
---|
| 208 | + return true; |
---|
| 209 | + |
---|
165 | 210 | pgdp = pgd_offset_k(addr); |
---|
166 | 211 | if (pgd_none(READ_ONCE(*pgdp))) |
---|
167 | 212 | return false; |
---|
168 | 213 | |
---|
169 | | - pudp = pud_offset(pgdp, addr); |
---|
| 214 | + p4dp = p4d_offset(pgdp, addr); |
---|
| 215 | + if (p4d_none(READ_ONCE(*p4dp))) |
---|
| 216 | + return false; |
---|
| 217 | + |
---|
| 218 | + pudp = pud_offset(p4dp, addr); |
---|
170 | 219 | pud = READ_ONCE(*pudp); |
---|
171 | 220 | if (pud_none(pud)) |
---|
172 | 221 | return false; |
---|
.. | .. |
---|
183 | 232 | ptep = pte_offset_kernel(pmdp, addr); |
---|
184 | 233 | return pte_valid(READ_ONCE(*ptep)); |
---|
185 | 234 | } |
---|
186 | | -#endif /* CONFIG_HIBERNATION */ |
---|
187 | | -#endif /* CONFIG_DEBUG_PAGEALLOC */ |
---|