hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/arm64/mm/pageattr.c
....@@ -1,14 +1,6 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License version 2 and
6
- * only version 2 as published by the Free Software Foundation.
7
- *
8
- * This program is distributed in the hope that it will be useful,
9
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
10
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
- * GNU General Public License for more details.
124 */
135 #include <linux/kernel.h>
146 #include <linux/mm.h>
....@@ -16,7 +8,7 @@
168 #include <linux/sched.h>
179 #include <linux/vmalloc.h>
1810
19
-#include <asm/pgtable.h>
11
+#include <asm/cacheflush.h>
2012 #include <asm/set_memory.h>
2113 #include <asm/tlbflush.h>
2214
....@@ -25,8 +17,9 @@
2517 pgprot_t clear_mask;
2618 };
2719
28
-static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
29
- void *data)
20
+bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
21
+
22
+static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
3023 {
3124 struct page_change_data *cdata = data;
3225 pte_t pte = READ_ONCE(*ptep);
....@@ -61,9 +54,10 @@
6154 pgprot_t set_mask, pgprot_t clear_mask)
6255 {
6356 unsigned long start = addr;
64
- unsigned long size = PAGE_SIZE*numpages;
57
+ unsigned long size = PAGE_SIZE * numpages;
6558 unsigned long end = start + size;
6659 struct vm_struct *area;
60
+ int i;
6761
6862 if (!PAGE_ALIGNED(addr)) {
6963 start &= PAGE_MASK;
....@@ -93,6 +87,24 @@
9387 if (!numpages)
9488 return 0;
9589
90
+ /*
91
+ * If we are manipulating read-only permissions, apply the same
92
+ * change to the linear mapping of the pages that back this VM area.
93
+ */
94
+ if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
95
+ pgprot_val(clear_mask) == PTE_RDONLY)) {
96
+ for (i = 0; i < area->nr_pages; i++) {
97
+ __change_memory_common((u64)page_address(area->pages[i]),
98
+ PAGE_SIZE, set_mask, clear_mask);
99
+ }
100
+ }
101
+
102
+ /*
103
+ * Get rid of potentially aliasing lazily unmapped vm areas that may
104
+ * have permissions set that deviate from the ones we are setting here.
105
+ */
106
+ vm_unmap_aliases();
107
+
96108 return __change_memory_common(start, size, set_mask, clear_mask);
97109 }
98110
....@@ -114,17 +126,15 @@
114126 {
115127 return change_memory_common(addr, numpages,
116128 __pgprot(PTE_PXN),
117
- __pgprot(0));
129
+ __pgprot(PTE_MAYBE_GP));
118130 }
119
-EXPORT_SYMBOL_GPL(set_memory_nx);
120131
121132 int set_memory_x(unsigned long addr, int numpages)
122133 {
123134 return change_memory_common(addr, numpages,
124
- __pgprot(0),
135
+ __pgprot(PTE_MAYBE_GP),
125136 __pgprot(PTE_PXN));
126137 }
127
-EXPORT_SYMBOL_GPL(set_memory_x);
128138
129139 int set_memory_valid(unsigned long addr, int numpages, int enable)
130140 {
....@@ -138,17 +148,48 @@
138148 __pgprot(PTE_VALID));
139149 }
140150
141
-#ifdef CONFIG_DEBUG_PAGEALLOC
151
+int set_direct_map_invalid_noflush(struct page *page)
152
+{
153
+ struct page_change_data data = {
154
+ .set_mask = __pgprot(0),
155
+ .clear_mask = __pgprot(PTE_VALID),
156
+ };
157
+
158
+ if (!rodata_full)
159
+ return 0;
160
+
161
+ return apply_to_page_range(&init_mm,
162
+ (unsigned long)page_address(page),
163
+ PAGE_SIZE, change_page_range, &data);
164
+}
165
+
166
+int set_direct_map_default_noflush(struct page *page)
167
+{
168
+ struct page_change_data data = {
169
+ .set_mask = __pgprot(PTE_VALID | PTE_WRITE),
170
+ .clear_mask = __pgprot(PTE_RDONLY),
171
+ };
172
+
173
+ if (!rodata_full)
174
+ return 0;
175
+
176
+ return apply_to_page_range(&init_mm,
177
+ (unsigned long)page_address(page),
178
+ PAGE_SIZE, change_page_range, &data);
179
+}
180
+
142181 void __kernel_map_pages(struct page *page, int numpages, int enable)
143182 {
183
+ if (!debug_pagealloc_enabled() && !rodata_full)
184
+ return;
185
+
144186 set_memory_valid((unsigned long)page_address(page), numpages, enable);
145187 }
146
-#ifdef CONFIG_HIBERNATION
188
+
147189 /*
148
- * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
149
- * is used to determine if a linear map page has been marked as not-valid by
150
- * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
151
- * This is based on kern_addr_valid(), which almost does what we need.
190
+ * This function is used to determine if a linear map page has been marked as
191
+ * not-valid. Walk the page table and check the PTE_VALID bit. This is based
192
+ * on kern_addr_valid(), which almost does what we need.
152193 *
153194 * Because this is only called on the kernel linear map, p?d_sect() implies
154195 * p?d_present(). When debug_pagealloc is enabled, sections mappings are
....@@ -157,16 +198,24 @@
157198 bool kernel_page_present(struct page *page)
158199 {
159200 pgd_t *pgdp;
201
+ p4d_t *p4dp;
160202 pud_t *pudp, pud;
161203 pmd_t *pmdp, pmd;
162204 pte_t *ptep;
163205 unsigned long addr = (unsigned long)page_address(page);
164206
207
+ if (!debug_pagealloc_enabled() && !rodata_full)
208
+ return true;
209
+
165210 pgdp = pgd_offset_k(addr);
166211 if (pgd_none(READ_ONCE(*pgdp)))
167212 return false;
168213
169
- pudp = pud_offset(pgdp, addr);
214
+ p4dp = p4d_offset(pgdp, addr);
215
+ if (p4d_none(READ_ONCE(*p4dp)))
216
+ return false;
217
+
218
+ pudp = pud_offset(p4dp, addr);
170219 pud = READ_ONCE(*pudp);
171220 if (pud_none(pud))
172221 return false;
....@@ -183,5 +232,3 @@
183232 ptep = pte_offset_kernel(pmdp, addr);
184233 return pte_valid(READ_ONCE(*ptep));
185234 }
186
-#endif /* CONFIG_HIBERNATION */
187
-#endif /* CONFIG_DEBUG_PAGEALLOC */