hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/arch/m68k/include/asm/motorola_pgtable.h
....@@ -23,7 +23,18 @@
2323 #define _DESCTYPE_MASK 0x003
2424
2525 #define _CACHEMASK040 (~0x060)
26
-#define _TABLE_MASK (0xfffffe00)
26
+
27
+/*
28
+ * Currently set to the minimum alignment of table pointers (256 bytes).
29
+ * The hardware only uses the low 4 bits for state:
30
+ *
31
+ * 3 - Used
32
+ * 2 - Write Protected
33
+ * 0,1 - Descriptor Type
34
+ *
35
+ * and has the rest of the bits reserved.
36
+ */
37
+#define _TABLE_MASK (0xffffff00)
2738
2839 #define _PAGE_TABLE (_PAGE_SHORT)
2940 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NOCACHE)
....@@ -108,23 +119,17 @@
108119
109120 static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
110121 {
111
- unsigned long ptbl = virt_to_phys(ptep) | _PAGE_TABLE | _PAGE_ACCESSED;
112
- unsigned long *ptr = pmdp->pmd;
113
- short i = 16;
114
- while (--i >= 0) {
115
- *ptr++ = ptbl;
116
- ptbl += (sizeof(pte_t)*PTRS_PER_PTE/16);
117
- }
122
+ pmd_val(*pmdp) = virt_to_phys(ptep) | _PAGE_TABLE | _PAGE_ACCESSED;
118123 }
119124
120
-static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
125
+static inline void pud_set(pud_t *pudp, pmd_t *pmdp)
121126 {
122
- pgd_val(*pgdp) = _PAGE_TABLE | _PAGE_ACCESSED | __pa(pmdp);
127
+ pud_val(*pudp) = _PAGE_TABLE | _PAGE_ACCESSED | __pa(pmdp);
123128 }
124129
125130 #define __pte_page(pte) ((unsigned long)__va(pte_val(pte) & PAGE_MASK))
126
-#define __pmd_page(pmd) ((unsigned long)__va(pmd_val(pmd) & _TABLE_MASK))
127
-#define __pgd_page(pgd) ((unsigned long)__va(pgd_val(pgd) & _TABLE_MASK))
131
+#define pmd_page_vaddr(pmd) ((unsigned long)__va(pmd_val(pmd) & _TABLE_MASK))
132
+#define pud_pgtable(pud) ((pmd_t *)__va(pud_val(pud) & _TABLE_MASK))
128133
129134
130135 #define pte_none(pte) (!pte_val(pte))
....@@ -138,20 +143,21 @@
138143 #define pmd_none(pmd) (!pmd_val(pmd))
139144 #define pmd_bad(pmd) ((pmd_val(pmd) & _DESCTYPE_MASK) != _PAGE_TABLE)
140145 #define pmd_present(pmd) (pmd_val(pmd) & _PAGE_TABLE)
141
-#define pmd_clear(pmdp) ({ \
142
- unsigned long *__ptr = pmdp->pmd; \
143
- short __i = 16; \
144
- while (--__i >= 0) \
145
- *__ptr++ = 0; \
146
-})
147
-#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
146
+#define pmd_clear(pmdp) ({ pmd_val(*pmdp) = 0; })
147
+
148
+/*
149
+ * m68k does not have huge pages (020/030 actually could), but generic code
150
+ * expects pmd_page() to exists, only to then DCE it all. Provide a dummy to
151
+ * make the compiler happy.
152
+ */
153
+#define pmd_page(pmd) NULL
148154
149155
150
-#define pgd_none(pgd) (!pgd_val(pgd))
151
-#define pgd_bad(pgd) ((pgd_val(pgd) & _DESCTYPE_MASK) != _PAGE_TABLE)
152
-#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_TABLE)
153
-#define pgd_clear(pgdp) ({ pgd_val(*pgdp) = 0; })
154
-#define pgd_page(pgd) (mem_map + ((unsigned long)(__va(pgd_val(pgd)) - PAGE_OFFSET) >> PAGE_SHIFT))
156
+#define pud_none(pud) (!pud_val(pud))
157
+#define pud_bad(pud) ((pud_val(pud) & _DESCTYPE_MASK) != _PAGE_TABLE)
158
+#define pud_present(pud) (pud_val(pud) & _PAGE_TABLE)
159
+#define pud_clear(pudp) ({ pud_val(*pudp) = 0; })
160
+#define pud_page(pud) (mem_map + ((unsigned long)(__va(pud_val(pud)) - PAGE_OFFSET) >> PAGE_SHIFT))
155161
156162 #define pte_ERROR(e) \
157163 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
....@@ -168,7 +174,6 @@
168174 static inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_RONLY); }
169175 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
170176 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
171
-static inline int pte_special(pte_t pte) { return 0; }
172177
173178 static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_RONLY; return pte; }
174179 static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
....@@ -186,84 +191,9 @@
186191 pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_supervisor_cachemode;
187192 return pte;
188193 }
189
-static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
190
-
191
-#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
192
-
193
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
194
-
195
-/* to find an entry in a page-table-directory */
196
-static inline pgd_t *pgd_offset(const struct mm_struct *mm,
197
- unsigned long address)
198
-{
199
- return mm->pgd + pgd_index(address);
200
-}
201194
202195 #define swapper_pg_dir kernel_pg_dir
203196 extern pgd_t kernel_pg_dir[128];
204
-
205
-static inline pgd_t *pgd_offset_k(unsigned long address)
206
-{
207
- return kernel_pg_dir + (address >> PGDIR_SHIFT);
208
-}
209
-
210
-
211
-/* Find an entry in the second-level page table.. */
212
-static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
213
-{
214
- return (pmd_t *)__pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PMD-1));
215
-}
216
-
217
-/* Find an entry in the third-level page table.. */
218
-static inline pte_t *pte_offset_kernel(pmd_t *pmdp, unsigned long address)
219
-{
220
- return (pte_t *)__pmd_page(*pmdp) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
221
-}
222
-
223
-#define pte_offset_map(pmdp,address) ((pte_t *)__pmd_page(*pmdp) + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
224
-#define pte_unmap(pte) ((void)0)
225
-
226
-/*
227
- * Allocate and free page tables. The xxx_kernel() versions are
228
- * used to allocate a kernel page table - this turns on ASN bits
229
- * if any.
230
- */
231
-
232
-/* Prior to calling these routines, the page should have been flushed
233
- * from both the cache and ATC, or the CPU might not notice that the
234
- * cache setting for the page has been changed. -jskov
235
- */
236
-static inline void nocache_page(void *vaddr)
237
-{
238
- unsigned long addr = (unsigned long)vaddr;
239
-
240
- if (CPU_IS_040_OR_060) {
241
- pgd_t *dir;
242
- pmd_t *pmdp;
243
- pte_t *ptep;
244
-
245
- dir = pgd_offset_k(addr);
246
- pmdp = pmd_offset(dir, addr);
247
- ptep = pte_offset_kernel(pmdp, addr);
248
- *ptep = pte_mknocache(*ptep);
249
- }
250
-}
251
-
252
-static inline void cache_page(void *vaddr)
253
-{
254
- unsigned long addr = (unsigned long)vaddr;
255
-
256
- if (CPU_IS_040_OR_060) {
257
- pgd_t *dir;
258
- pmd_t *pmdp;
259
- pte_t *ptep;
260
-
261
- dir = pgd_offset_k(addr);
262
- pmdp = pmd_offset(dir, addr);
263
- ptep = pte_offset_kernel(pmdp, addr);
264
- *ptep = pte_mkcache(*ptep);
265
- }
266
-}
267197
268198 /* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
269199 #define __swp_type(x) (((x).val >> 4) & 0xff)