.. | .. |
---|
53 | 53 | |
---|
54 | 54 | struct mm_struct; |
---|
55 | 55 | |
---|
| 56 | +#define mm_p4d_folded mm_p4d_folded |
---|
| 57 | +static inline bool mm_p4d_folded(struct mm_struct *mm) |
---|
| 58 | +{ |
---|
| 59 | + return !pgtable_l5_enabled(); |
---|
| 60 | +} |
---|
| 61 | + |
---|
56 | 62 | void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte); |
---|
57 | 63 | void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); |
---|
58 | 64 | |
---|
.. | .. |
---|
162 | 168 | native_set_pgd(pgd, native_make_pgd(0)); |
---|
163 | 169 | } |
---|
164 | 170 | |
---|
165 | | -extern void sync_global_pgds(unsigned long start, unsigned long end); |
---|
166 | | - |
---|
167 | 171 | /* |
---|
168 | 172 | * Conversion functions: convert a page and protection to a page entry, |
---|
169 | 173 | * and a page entry and page directory to the page they refer to. |
---|
170 | 174 | */ |
---|
171 | 175 | |
---|
172 | | -/* |
---|
173 | | - * Level 4 access. |
---|
174 | | - */ |
---|
175 | | -#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE) |
---|
| 176 | +/* PGD - Level 4 access */ |
---|
176 | 177 | |
---|
177 | | -/* PUD - Level3 access */ |
---|
| 178 | +/* PUD - Level 3 access */ |
---|
178 | 179 | |
---|
179 | | -/* PMD - Level 2 access */ |
---|
| 180 | +/* PMD - Level 2 access */ |
---|
180 | 181 | |
---|
181 | | -/* PTE - Level 1 access. */ |
---|
182 | | - |
---|
183 | | -/* x86-64 always has all page tables mapped. */ |
---|
184 | | -#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) |
---|
185 | | -#define pte_unmap(pte) ((void)(pte))/* NOP */ |
---|
| 182 | +/* PTE - Level 1 access */ |
---|
186 | 183 | |
---|
187 | 184 | /* |
---|
188 | 185 | * Encode and de-code a swap entry |
---|
189 | 186 | * |
---|
190 | 187 | * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number |
---|
191 | 188 | * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names |
---|
192 | | - * | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry |
---|
| 189 | + * | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| X|F|SD|0| <- swp entry |
---|
193 | 190 | * |
---|
194 | 191 | * G (8) is aliased and used as a PROT_NONE indicator for |
---|
195 | 192 | * !present ptes. We need to start storing swap entries above |
---|
.. | .. |
---|
197 | 194 | * erratum where they can be incorrectly set by hardware on |
---|
198 | 195 | * non-present PTEs. |
---|
199 | 196 | * |
---|
| 197 | + * SD Bits 1-4 are not used in non-present format and available for |
---|
| 198 | + * special use described below: |
---|
| 199 | + * |
---|
200 | 200 | * SD (1) in swp entry is used to store soft dirty bit, which helps us |
---|
201 | 201 | * remember soft dirty over page migration |
---|
| 202 | + * |
---|
| 203 | + * F (2) in swp entry is used to record when a pagetable is |
---|
| 204 | + * writeprotected by userfaultfd WP support. |
---|
202 | 205 | * |
---|
203 | 206 | * Bit 7 in swp entry should be 0 because pmd_present checks not only P, |
---|
204 | 207 | * but also L and G. |
---|
.. | .. |
---|
232 | 235 | |
---|
233 | 236 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) |
---|
234 | 237 | #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val((pmd)) }) |
---|
235 | | -#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) |
---|
236 | | -#define __swp_entry_to_pmd(x) ((pmd_t) { .pmd = (x).val }) |
---|
| 238 | +#define __swp_entry_to_pte(x) (__pte((x).val)) |
---|
| 239 | +#define __swp_entry_to_pmd(x) (__pmd((x).val)) |
---|
237 | 240 | |
---|
238 | 241 | extern int kern_addr_valid(unsigned long addr); |
---|
239 | 242 | extern void cleanup_highmap(void); |
---|
240 | 243 | |
---|
241 | 244 | #define HAVE_ARCH_UNMAPPED_AREA |
---|
242 | 245 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN |
---|
243 | | - |
---|
244 | | -#define pgtable_cache_init() do { } while (0) |
---|
245 | | -#define check_pgt_cache() do { } while (0) |
---|
246 | 246 | |
---|
247 | 247 | #define PAGE_AGP PAGE_KERNEL_NOCACHE |
---|
248 | 248 | #define HAVE_PAGE_AGP 1 |
---|
.. | .. |
---|
259 | 259 | extern void init_extra_mapping_wb(unsigned long phys, unsigned long size); |
---|
260 | 260 | |
---|
261 | 261 | #define gup_fast_permitted gup_fast_permitted |
---|
262 | | -static inline bool gup_fast_permitted(unsigned long start, int nr_pages, |
---|
263 | | - int write) |
---|
| 262 | +static inline bool gup_fast_permitted(unsigned long start, unsigned long end) |
---|
264 | 263 | { |
---|
265 | | - unsigned long len, end; |
---|
266 | | - |
---|
267 | | - len = (unsigned long)nr_pages << PAGE_SHIFT; |
---|
268 | | - end = start + len; |
---|
269 | | - if (end < start) |
---|
270 | | - return false; |
---|
271 | 264 | if (end >> __VIRTUAL_MASK_SHIFT) |
---|
272 | 265 | return false; |
---|
273 | 266 | return true; |
---|