hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/arch/m68k/mm/memory.c
....@@ -17,113 +17,9 @@
1717 #include <asm/setup.h>
1818 #include <asm/segment.h>
1919 #include <asm/page.h>
20
-#include <asm/pgalloc.h>
2120 #include <asm/traps.h>
2221 #include <asm/machdep.h>
2322
24
-
25
-/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
26
- struct page instead of separately kmalloced struct. Stolen from
27
- arch/sparc/mm/srmmu.c ... */
28
-
29
-typedef struct list_head ptable_desc;
30
-static LIST_HEAD(ptable_list);
31
-
32
-#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
33
-#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
34
-#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)
35
-
36
-#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
37
-
38
-void __init init_pointer_table(unsigned long ptable)
39
-{
40
- ptable_desc *dp;
41
- unsigned long page = ptable & PAGE_MASK;
42
- unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE);
43
-
44
- dp = PD_PTABLE(page);
45
- if (!(PD_MARKBITS(dp) & mask)) {
46
- PD_MARKBITS(dp) = 0xff;
47
- list_add(dp, &ptable_list);
48
- }
49
-
50
- PD_MARKBITS(dp) &= ~mask;
51
- pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
52
-
53
- /* unreserve the page so it's possible to free that page */
54
- PD_PAGE(dp)->flags &= ~(1 << PG_reserved);
55
- init_page_count(PD_PAGE(dp));
56
-
57
- return;
58
-}
59
-
60
-pmd_t *get_pointer_table (void)
61
-{
62
- ptable_desc *dp = ptable_list.next;
63
- unsigned char mask = PD_MARKBITS (dp);
64
- unsigned char tmp;
65
- unsigned int off;
66
-
67
- /*
68
- * For a pointer table for a user process address space, a
69
- * table is taken from a page allocated for the purpose. Each
70
- * page can hold 8 pointer tables. The page is remapped in
71
- * virtual address space to be noncacheable.
72
- */
73
- if (mask == 0) {
74
- void *page;
75
- ptable_desc *new;
76
-
77
- if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
78
- return NULL;
79
-
80
- flush_tlb_kernel_page(page);
81
- nocache_page(page);
82
-
83
- new = PD_PTABLE(page);
84
- PD_MARKBITS(new) = 0xfe;
85
- list_add_tail(new, dp);
86
-
87
- return (pmd_t *)page;
88
- }
89
-
90
- for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE)
91
- ;
92
- PD_MARKBITS(dp) = mask & ~tmp;
93
- if (!PD_MARKBITS(dp)) {
94
- /* move to end of list */
95
- list_move_tail(dp, &ptable_list);
96
- }
97
- return (pmd_t *) (page_address(PD_PAGE(dp)) + off);
98
-}
99
-
100
-int free_pointer_table (pmd_t *ptable)
101
-{
102
- ptable_desc *dp;
103
- unsigned long page = (unsigned long)ptable & PAGE_MASK;
104
- unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE);
105
-
106
- dp = PD_PTABLE(page);
107
- if (PD_MARKBITS (dp) & mask)
108
- panic ("table already free!");
109
-
110
- PD_MARKBITS (dp) |= mask;
111
-
112
- if (PD_MARKBITS(dp) == 0xff) {
113
- /* all tables in page are free, free page */
114
- list_del(dp);
115
- cache_page((void *)page);
116
- free_page (page);
117
- return 1;
118
- } else if (ptable_list.next != dp) {
119
- /*
120
- * move this descriptor to the front of the list, since
121
- * it has one or more free tables.
122
- */
123
- list_move(dp, &ptable_list);
124
- }
125
- return 0;
126
-}
12723
12824 /* invalidate page in both caches */
12925 static inline void clear040(unsigned long paddr)