hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/arch/x86/include/asm/pgtable_64.h
....@@ -53,6 +53,12 @@
5353
5454 struct mm_struct;
5555
56
+#define mm_p4d_folded mm_p4d_folded
57
+static inline bool mm_p4d_folded(struct mm_struct *mm)
58
+{
59
+ return !pgtable_l5_enabled();
60
+}
61
+
5662 void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
5763 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
5864
....@@ -162,34 +168,25 @@
162168 native_set_pgd(pgd, native_make_pgd(0));
163169 }
164170
165
-extern void sync_global_pgds(unsigned long start, unsigned long end);
166
-
167171 /*
168172 * Conversion functions: convert a page and protection to a page entry,
169173 * and a page entry and page directory to the page they refer to.
170174 */
171175
172
-/*
173
- * Level 4 access.
174
- */
175
-#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
176
+/* PGD - Level 4 access */
176177
177
-/* PUD - Level3 access */
178
+/* PUD - Level 3 access */
178179
179
-/* PMD - Level 2 access */
180
+/* PMD - Level 2 access */
180181
181
-/* PTE - Level 1 access. */
182
-
183
-/* x86-64 always has all page tables mapped. */
184
-#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
185
-#define pte_unmap(pte) ((void)(pte))/* NOP */
182
+/* PTE - Level 1 access */
186183
187184 /*
188185 * Encode and de-code a swap entry
189186 *
190187 * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number
191188 * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
192
- * | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry
189
+ * | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| X|F|SD|0| <- swp entry
193190 *
194191 * G (8) is aliased and used as a PROT_NONE indicator for
195192 * !present ptes. We need to start storing swap entries above
....@@ -197,8 +194,14 @@
197194 * erratum where they can be incorrectly set by hardware on
198195 * non-present PTEs.
199196 *
197
+ * SD Bits 1-4 are not used in non-present format and available for
198
+ * special use described below:
199
+ *
200200 * SD (1) in swp entry is used to store soft dirty bit, which helps us
201201 * remember soft dirty over page migration
202
+ *
203
+ * F (2) in swp entry is used to record when a pagetable is
204
+ * writeprotected by userfaultfd WP support.
202205 *
203206 * Bit 7 in swp entry should be 0 because pmd_present checks not only P,
204207 * but also L and G.
....@@ -232,17 +235,14 @@
232235
233236 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
234237 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val((pmd)) })
235
-#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
236
-#define __swp_entry_to_pmd(x) ((pmd_t) { .pmd = (x).val })
238
+#define __swp_entry_to_pte(x) (__pte((x).val))
239
+#define __swp_entry_to_pmd(x) (__pmd((x).val))
237240
238241 extern int kern_addr_valid(unsigned long addr);
239242 extern void cleanup_highmap(void);
240243
241244 #define HAVE_ARCH_UNMAPPED_AREA
242245 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
243
-
244
-#define pgtable_cache_init() do { } while (0)
245
-#define check_pgt_cache() do { } while (0)
246246
247247 #define PAGE_AGP PAGE_KERNEL_NOCACHE
248248 #define HAVE_PAGE_AGP 1
....@@ -259,15 +259,8 @@
259259 extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
260260
261261 #define gup_fast_permitted gup_fast_permitted
262
-static inline bool gup_fast_permitted(unsigned long start, int nr_pages,
263
- int write)
262
+static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
264263 {
265
- unsigned long len, end;
266
-
267
- len = (unsigned long)nr_pages << PAGE_SHIFT;
268
- end = start + len;
269
- if (end < start)
270
- return false;
271264 if (end >> __VIRTUAL_MASK_SHIFT)
272265 return false;
273266 return true;