hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/alpha/include/asm/pgtable.h
....@@ -2,7 +2,7 @@
22 #ifndef _ALPHA_PGTABLE_H
33 #define _ALPHA_PGTABLE_H
44
5
-#include <asm-generic/4level-fixup.h>
5
+#include <asm-generic/pgtable-nopud.h>
66
77 /*
88 * This file contains the functions and defines necessary to modify and use
....@@ -226,8 +226,8 @@
226226 extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
227227 { pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
228228
229
-extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
230
-{ pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
229
+extern inline void pud_set(pud_t * pudp, pmd_t * pmdp)
230
+{ pud_val(*pudp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
231231
232232
233233 extern inline unsigned long
....@@ -238,11 +238,11 @@
238238
239239 #ifndef CONFIG_DISCONTIGMEM
240240 #define pmd_page(pmd) (mem_map + ((pmd_val(pmd) & _PFN_MASK) >> 32))
241
-#define pgd_page(pgd) (mem_map + ((pgd_val(pgd) & _PFN_MASK) >> 32))
241
+#define pud_page(pud) (mem_map + ((pud_val(pud) & _PFN_MASK) >> 32))
242242 #endif
243243
244
-extern inline unsigned long pgd_page_vaddr(pgd_t pgd)
245
-{ return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
244
+extern inline unsigned long pud_page_vaddr(pud_t pgd)
245
+{ return PAGE_OFFSET + ((pud_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
246246
247247 extern inline int pte_none(pte_t pte) { return !pte_val(pte); }
248248 extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; }
....@@ -256,10 +256,10 @@
256256 extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _PAGE_VALID; }
257257 extern inline void pmd_clear(pmd_t * pmdp) { pmd_val(*pmdp) = 0; }
258258
259
-extern inline int pgd_none(pgd_t pgd) { return !pgd_val(pgd); }
260
-extern inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE; }
261
-extern inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) & _PAGE_VALID; }
262
-extern inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; }
259
+extern inline int pud_none(pud_t pud) { return !pud_val(pud); }
260
+extern inline int pud_bad(pud_t pud) { return (pud_val(pud) & ~_PFN_MASK) != _PAGE_TABLE; }
261
+extern inline int pud_present(pud_t pud) { return pud_val(pud) & _PAGE_VALID; }
262
+extern inline void pud_clear(pud_t * pudp) { pud_val(*pudp) = 0; }
263263
264264 /*
265265 * The following only work if pte_present() is true.
....@@ -268,7 +268,6 @@
268268 extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_FOW); }
269269 extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
270270 extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
271
-extern inline int pte_special(pte_t pte) { return 0; }
272271
273272 extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; }
274273 extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
....@@ -276,21 +275,11 @@
276275 extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return pte; }
277276 extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; }
278277 extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; }
279
-extern inline pte_t pte_mkspecial(pte_t pte) { return pte; }
280
-
281
-#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
282
-
283
-/* to find an entry in a kernel page-table-directory */
284
-#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
285
-
286
-/* to find an entry in a page-table-directory. */
287
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
288
-#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
289278
290279 /*
291
- * The smp_read_barrier_depends() in the following functions are required to
292
- * order the load of *dir (the pointer in the top level page table) with any
293
- * subsequent load of the returned pmd_t *ret (ret is data dependent on *dir).
280
+ * The smp_rmb() in the following functions are required to order the load of
281
+ * *dir (the pointer in the top level page table) with any subsequent load of
282
+ * the returned pmd_t *ret (ret is data dependent on *dir).
294283 *
295284 * If this ordering is not enforced, the CPU might load an older value of
296285 * *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for
....@@ -301,24 +290,23 @@
301290 */
302291
303292 /* Find an entry in the second-level page table.. */
304
-extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
293
+extern inline pmd_t * pmd_offset(pud_t * dir, unsigned long address)
305294 {
306
- pmd_t *ret = (pmd_t *) pgd_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
307
- smp_read_barrier_depends(); /* see above */
295
+ pmd_t *ret = (pmd_t *) pud_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
296
+ smp_rmb(); /* see above */
308297 return ret;
309298 }
299
+#define pmd_offset pmd_offset
310300
311301 /* Find an entry in the third-level page table.. */
312302 extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
313303 {
314304 pte_t *ret = (pte_t *) pmd_page_vaddr(*dir)
315305 + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
316
- smp_read_barrier_depends(); /* see above */
306
+ smp_rmb(); /* see above */
317307 return ret;
318308 }
319
-
320
-#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
321
-#define pte_unmap(pte) do { } while (0)
309
+#define pte_offset_kernel pte_offset_kernel
322310
323311 extern pgd_t swapper_pg_dir[1024];
324312
....@@ -356,13 +344,6 @@
356344 printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
357345
358346 extern void paging_init(void);
359
-
360
-#include <asm-generic/pgtable.h>
361
-
362
-/*
363
- * No page table caches to initialise
364
- */
365
-#define pgtable_cache_init() do { } while (0)
366347
367348 /* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */
368349 #define HAVE_ARCH_UNMAPPED_AREA