hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/m68k/mm/motorola.c
....@@ -18,7 +18,6 @@
1818 #include <linux/string.h>
1919 #include <linux/types.h>
2020 #include <linux/init.h>
21
-#include <linux/bootmem.h>
2221 #include <linux/memblock.h>
2322 #include <linux/gfp.h>
2423
....@@ -46,31 +45,210 @@
4645 EXPORT_SYMBOL(mm_cachebits);
4746 #endif
4847
48
+/* Prior to calling these routines, the page should have been flushed
49
+ * from both the cache and ATC, or the CPU might not notice that the
50
+ * cache setting for the page has been changed. -jskov
51
+ */
52
+static inline void nocache_page(void *vaddr)
53
+{
54
+ unsigned long addr = (unsigned long)vaddr;
55
+
56
+ if (CPU_IS_040_OR_060) {
57
+ pte_t *ptep = virt_to_kpte(addr);
58
+
59
+ *ptep = pte_mknocache(*ptep);
60
+ }
61
+}
62
+
63
+static inline void cache_page(void *vaddr)
64
+{
65
+ unsigned long addr = (unsigned long)vaddr;
66
+
67
+ if (CPU_IS_040_OR_060) {
68
+ pte_t *ptep = virt_to_kpte(addr);
69
+
70
+ *ptep = pte_mkcache(*ptep);
71
+ }
72
+}
73
+
74
+/*
75
+ * Motorola 680x0 user's manual recommends using uncached memory for address
76
+ * translation tables.
77
+ *
78
+ * Seeing how the MMU can be external on (some of) these chips, that seems like
79
+ * a very important recommendation to follow. Provide some helpers to combat
80
+ * 'variation' amongst the users of this.
81
+ */
82
+
83
+void mmu_page_ctor(void *page)
84
+{
85
+ __flush_page_to_ram(page);
86
+ flush_tlb_kernel_page(page);
87
+ nocache_page(page);
88
+}
89
+
90
+void mmu_page_dtor(void *page)
91
+{
92
+ cache_page(page);
93
+}
94
+
95
+/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
96
+ struct page instead of separately kmalloced struct. Stolen from
97
+ arch/sparc/mm/srmmu.c ... */
98
+
99
+typedef struct list_head ptable_desc;
100
+
101
+static struct list_head ptable_list[2] = {
102
+ LIST_HEAD_INIT(ptable_list[0]),
103
+ LIST_HEAD_INIT(ptable_list[1]),
104
+};
105
+
106
+#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
107
+#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
108
+#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)
109
+
110
+static const int ptable_shift[2] = {
111
+ 7+2, /* PGD, PMD */
112
+ 6+2, /* PTE */
113
+};
114
+
115
+#define ptable_size(type) (1U << ptable_shift[type])
116
+#define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1)
117
+
118
+void __init init_pointer_table(void *table, int type)
119
+{
120
+ ptable_desc *dp;
121
+ unsigned long ptable = (unsigned long)table;
122
+ unsigned long page = ptable & PAGE_MASK;
123
+ unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
124
+
125
+ dp = PD_PTABLE(page);
126
+ if (!(PD_MARKBITS(dp) & mask)) {
127
+ PD_MARKBITS(dp) = ptable_mask(type);
128
+ list_add(dp, &ptable_list[type]);
129
+ }
130
+
131
+ PD_MARKBITS(dp) &= ~mask;
132
+ pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
133
+
134
+ /* unreserve the page so it's possible to free that page */
135
+ __ClearPageReserved(PD_PAGE(dp));
136
+ init_page_count(PD_PAGE(dp));
137
+
138
+ return;
139
+}
140
+
141
+void *get_pointer_table(int type)
142
+{
143
+ ptable_desc *dp = ptable_list[type].next;
144
+ unsigned int mask = list_empty(&ptable_list[type]) ? 0 : PD_MARKBITS(dp);
145
+ unsigned int tmp, off;
146
+
147
+ /*
148
+ * For a pointer table for a user process address space, a
149
+ * table is taken from a page allocated for the purpose. Each
150
+ * page can hold 8 pointer tables. The page is remapped in
151
+ * virtual address space to be noncacheable.
152
+ */
153
+ if (mask == 0) {
154
+ void *page;
155
+ ptable_desc *new;
156
+
157
+ if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
158
+ return NULL;
159
+
160
+ if (type == TABLE_PTE) {
161
+ /*
162
+ * m68k doesn't have SPLIT_PTE_PTLOCKS for not having
163
+ * SMP.
164
+ */
165
+ pgtable_pte_page_ctor(virt_to_page(page));
166
+ }
167
+
168
+ mmu_page_ctor(page);
169
+
170
+ new = PD_PTABLE(page);
171
+ PD_MARKBITS(new) = ptable_mask(type) - 1;
172
+ list_add_tail(new, dp);
173
+
174
+ return (pmd_t *)page;
175
+ }
176
+
177
+ for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += ptable_size(type))
178
+ ;
179
+ PD_MARKBITS(dp) = mask & ~tmp;
180
+ if (!PD_MARKBITS(dp)) {
181
+ /* move to end of list */
182
+ list_move_tail(dp, &ptable_list[type]);
183
+ }
184
+ return page_address(PD_PAGE(dp)) + off;
185
+}
186
+
187
+int free_pointer_table(void *table, int type)
188
+{
189
+ ptable_desc *dp;
190
+ unsigned long ptable = (unsigned long)table;
191
+ unsigned long page = ptable & PAGE_MASK;
192
+ unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
193
+
194
+ dp = PD_PTABLE(page);
195
+ if (PD_MARKBITS (dp) & mask)
196
+ panic ("table already free!");
197
+
198
+ PD_MARKBITS (dp) |= mask;
199
+
200
+ if (PD_MARKBITS(dp) == ptable_mask(type)) {
201
+ /* all tables in page are free, free page */
202
+ list_del(dp);
203
+ mmu_page_dtor((void *)page);
204
+ if (type == TABLE_PTE)
205
+ pgtable_pte_page_dtor(virt_to_page(page));
206
+ free_page (page);
207
+ return 1;
208
+ } else if (ptable_list[type].next != dp) {
209
+ /*
210
+ * move this descriptor to the front of the list, since
211
+ * it has one or more free tables.
212
+ */
213
+ list_move(dp, &ptable_list[type]);
214
+ }
215
+ return 0;
216
+}
217
+
49218 /* size of memory already mapped in head.S */
50219 extern __initdata unsigned long m68k_init_mapped_size;
51220
52221 extern unsigned long availmem;
53222
223
+static pte_t *last_pte_table __initdata = NULL;
224
+
54225 static pte_t * __init kernel_page_table(void)
55226 {
56
- pte_t *ptablep;
227
+ pte_t *pte_table = last_pte_table;
57228
58
- ptablep = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
229
+ if (PAGE_ALIGNED(last_pte_table)) {
230
+ pte_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
231
+ if (!pte_table) {
232
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
233
+ __func__, PAGE_SIZE, PAGE_SIZE);
234
+ }
59235
60
- clear_page(ptablep);
61
- __flush_page_to_ram(ptablep);
62
- flush_tlb_kernel_page(ptablep);
63
- nocache_page(ptablep);
236
+ clear_page(pte_table);
237
+ mmu_page_ctor(pte_table);
64238
65
- return ptablep;
239
+ last_pte_table = pte_table;
240
+ }
241
+
242
+ last_pte_table += PTRS_PER_PTE;
243
+
244
+ return pte_table;
66245 }
67246
68
-static pmd_t *last_pgtable __initdata = NULL;
69
-pmd_t *zero_pgtable __initdata = NULL;
247
+static pmd_t *last_pmd_table __initdata = NULL;
70248
71249 static pmd_t * __init kernel_ptr_table(void)
72250 {
73
- if (!last_pgtable) {
251
+ if (!last_pmd_table) {
74252 unsigned long pmd, last;
75253 int i;
76254
....@@ -80,38 +258,41 @@
80258 */
81259 last = (unsigned long)kernel_pg_dir;
82260 for (i = 0; i < PTRS_PER_PGD; i++) {
83
- if (!pgd_present(kernel_pg_dir[i]))
261
+ pud_t *pud = (pud_t *)(&kernel_pg_dir[i]);
262
+
263
+ if (!pud_present(*pud))
84264 continue;
85
- pmd = __pgd_page(kernel_pg_dir[i]);
265
+ pmd = pgd_page_vaddr(kernel_pg_dir[i]);
86266 if (pmd > last)
87267 last = pmd;
88268 }
89269
90
- last_pgtable = (pmd_t *)last;
270
+ last_pmd_table = (pmd_t *)last;
91271 #ifdef DEBUG
92
- printk("kernel_ptr_init: %p\n", last_pgtable);
272
+ printk("kernel_ptr_init: %p\n", last_pmd_table);
93273 #endif
94274 }
95275
96
- last_pgtable += PTRS_PER_PMD;
97
- if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
98
- last_pgtable = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
276
+ last_pmd_table += PTRS_PER_PMD;
277
+ if (PAGE_ALIGNED(last_pmd_table)) {
278
+ last_pmd_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
279
+ if (!last_pmd_table)
280
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
281
+ __func__, PAGE_SIZE, PAGE_SIZE);
99282
100
- clear_page(last_pgtable);
101
- __flush_page_to_ram(last_pgtable);
102
- flush_tlb_kernel_page(last_pgtable);
103
- nocache_page(last_pgtable);
283
+ clear_page(last_pmd_table);
284
+ mmu_page_ctor(last_pmd_table);
104285 }
105286
106
- return last_pgtable;
287
+ return last_pmd_table;
107288 }
108289
109290 static void __init map_node(int node)
110291 {
111
-#define PTRTREESIZE (256*1024)
112
-#define ROOTTREESIZE (32*1024*1024)
113292 unsigned long physaddr, virtaddr, size;
114293 pgd_t *pgd_dir;
294
+ p4d_t *p4d_dir;
295
+ pud_t *pud_dir;
115296 pmd_t *pmd_dir;
116297 pte_t *pte_dir;
117298
....@@ -125,56 +306,57 @@
125306
126307 while (size > 0) {
127308 #ifdef DEBUG
128
- if (!(virtaddr & (PTRTREESIZE-1)))
309
+ if (!(virtaddr & (PMD_SIZE-1)))
129310 printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
130311 virtaddr);
131312 #endif
132313 pgd_dir = pgd_offset_k(virtaddr);
133314 if (virtaddr && CPU_IS_020_OR_030) {
134
- if (!(virtaddr & (ROOTTREESIZE-1)) &&
135
- size >= ROOTTREESIZE) {
315
+ if (!(virtaddr & (PGDIR_SIZE-1)) &&
316
+ size >= PGDIR_SIZE) {
136317 #ifdef DEBUG
137318 printk ("[very early term]");
138319 #endif
139320 pgd_val(*pgd_dir) = physaddr;
140
- size -= ROOTTREESIZE;
141
- virtaddr += ROOTTREESIZE;
142
- physaddr += ROOTTREESIZE;
321
+ size -= PGDIR_SIZE;
322
+ virtaddr += PGDIR_SIZE;
323
+ physaddr += PGDIR_SIZE;
143324 continue;
144325 }
145326 }
146
- if (!pgd_present(*pgd_dir)) {
327
+ p4d_dir = p4d_offset(pgd_dir, virtaddr);
328
+ pud_dir = pud_offset(p4d_dir, virtaddr);
329
+ if (!pud_present(*pud_dir)) {
147330 pmd_dir = kernel_ptr_table();
148331 #ifdef DEBUG
149332 printk ("[new pointer %p]", pmd_dir);
150333 #endif
151
- pgd_set(pgd_dir, pmd_dir);
334
+ pud_set(pud_dir, pmd_dir);
152335 } else
153
- pmd_dir = pmd_offset(pgd_dir, virtaddr);
336
+ pmd_dir = pmd_offset(pud_dir, virtaddr);
154337
155338 if (CPU_IS_020_OR_030) {
156339 if (virtaddr) {
157340 #ifdef DEBUG
158341 printk ("[early term]");
159342 #endif
160
- pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
161
- physaddr += PTRTREESIZE;
343
+ pmd_val(*pmd_dir) = physaddr;
344
+ physaddr += PMD_SIZE;
162345 } else {
163346 int i;
164347 #ifdef DEBUG
165348 printk ("[zero map]");
166349 #endif
167
- zero_pgtable = kernel_ptr_table();
168
- pte_dir = (pte_t *)zero_pgtable;
169
- pmd_dir->pmd[0] = virt_to_phys(pte_dir) |
170
- _PAGE_TABLE | _PAGE_ACCESSED;
350
+ pte_dir = kernel_page_table();
351
+ pmd_set(pmd_dir, pte_dir);
352
+
171353 pte_val(*pte_dir++) = 0;
172354 physaddr += PAGE_SIZE;
173
- for (i = 1; i < 64; physaddr += PAGE_SIZE, i++)
355
+ for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++)
174356 pte_val(*pte_dir++) = physaddr;
175357 }
176
- size -= PTRTREESIZE;
177
- virtaddr += PTRTREESIZE;
358
+ size -= PMD_SIZE;
359
+ virtaddr += PMD_SIZE;
178360 } else {
179361 if (!pmd_present(*pmd_dir)) {
180362 #ifdef DEBUG
....@@ -207,7 +389,7 @@
207389 */
208390 void __init paging_init(void)
209391 {
210
- unsigned long zones_size[MAX_NR_ZONES] = { 0, };
392
+ unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
211393 unsigned long min_addr, max_addr;
212394 unsigned long addr;
213395 int i;
....@@ -228,7 +410,7 @@
228410
229411 min_addr = m68k_memory[0].addr;
230412 max_addr = min_addr + m68k_memory[0].size;
231
- memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
413
+ memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0);
232414 for (i = 1; i < m68k_num_memory;) {
233415 if (m68k_memory[i].addr < min_addr) {
234416 printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
....@@ -239,7 +421,7 @@
239421 (m68k_num_memory - i) * sizeof(struct m68k_mem_info));
240422 continue;
241423 }
242
- memblock_add(m68k_memory[i].addr, m68k_memory[i].size);
424
+ memblock_add_node(m68k_memory[i].addr, m68k_memory[i].size, i);
243425 addr = m68k_memory[i].addr + m68k_memory[i].size;
244426 if (addr > max_addr)
245427 max_addr = addr;
....@@ -277,7 +459,10 @@
277459 * initialize the bad page table and bad page to point
278460 * to a couple of allocated pages
279461 */
280
- empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
462
+ empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
463
+ if (!empty_zero_page)
464
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
465
+ __func__, PAGE_SIZE, PAGE_SIZE);
281466
282467 /*
283468 * Set up SFC/DFC registers
....@@ -287,12 +472,10 @@
287472 #ifdef DEBUG
288473 printk ("before free_area_init\n");
289474 #endif
290
- for (i = 0; i < m68k_num_memory; i++) {
291
- zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT;
292
- free_area_init_node(i, zones_size,
293
- m68k_memory[i].addr >> PAGE_SHIFT, NULL);
475
+ for (i = 0; i < m68k_num_memory; i++)
294476 if (node_present_pages(i))
295477 node_set_state(i, N_NORMAL_MEMORY);
296
- }
297
-}
298478
479
+ max_zone_pfn[ZONE_DMA] = memblock_end_of_DRAM();
480
+ free_area_init(max_zone_pfn);
481
+}