.. | .. |
---|
13 | 13 | #include <linux/mm.h> |
---|
14 | 14 | #include <linux/init.h> |
---|
15 | 15 | #include <linux/string.h> |
---|
16 | | -#include <linux/bootmem.h> |
---|
17 | 16 | #include <linux/memblock.h> |
---|
18 | 17 | |
---|
19 | 18 | #include <asm/setup.h> |
---|
20 | 19 | #include <asm/page.h> |
---|
21 | | -#include <asm/pgtable.h> |
---|
22 | 20 | #include <asm/mmu_context.h> |
---|
23 | 21 | #include <asm/mcf_pgalloc.h> |
---|
24 | 22 | #include <asm/tlbflush.h> |
---|
| 23 | +#include <asm/pgalloc.h> |
---|
25 | 24 | |
---|
26 | 25 | #define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END)) |
---|
27 | 26 | |
---|
.. | .. |
---|
40 | 39 | pte_t *pg_table; |
---|
41 | 40 | unsigned long address, size; |
---|
42 | 41 | unsigned long next_pgtable, bootmem_end; |
---|
43 | | - unsigned long zones_size[MAX_NR_ZONES]; |
---|
44 | | - enum zone_type zone; |
---|
| 42 | + unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 }; |
---|
45 | 43 | int i; |
---|
46 | 44 | |
---|
47 | | - empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE); |
---|
48 | | - memset((void *) empty_zero_page, 0, PAGE_SIZE); |
---|
| 45 | + empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
---|
| 46 | + if (!empty_zero_page) |
---|
| 47 | + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", |
---|
| 48 | + __func__, PAGE_SIZE, PAGE_SIZE); |
---|
49 | 49 | |
---|
50 | 50 | pg_dir = swapper_pg_dir; |
---|
51 | 51 | memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); |
---|
52 | 52 | |
---|
53 | 53 | size = num_pages * sizeof(pte_t); |
---|
54 | 54 | size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); |
---|
55 | | - next_pgtable = (unsigned long) alloc_bootmem_pages(size); |
---|
| 55 | + next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE); |
---|
| 56 | + if (!next_pgtable) |
---|
| 57 | + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", |
---|
| 58 | + __func__, size, PAGE_SIZE); |
---|
56 | 59 | |
---|
57 | 60 | bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK; |
---|
58 | 61 | pg_dir += PAGE_OFFSET >> PGDIR_SHIFT; |
---|
.. | .. |
---|
76 | 79 | } |
---|
77 | 80 | |
---|
78 | 81 | current->mm = NULL; |
---|
79 | | - |
---|
80 | | - for (zone = 0; zone < MAX_NR_ZONES; zone++) |
---|
81 | | - zones_size[zone] = 0x0; |
---|
82 | | - zones_size[ZONE_DMA] = num_pages; |
---|
83 | | - free_area_init(zones_size); |
---|
| 82 | + max_zone_pfn[ZONE_DMA] = PFN_DOWN(_ramend); |
---|
| 83 | + free_area_init(max_zone_pfn); |
---|
84 | 84 | } |
---|
85 | 85 | |
---|
86 | 86 | int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word) |
---|
.. | .. |
---|
88 | 88 | unsigned long flags, mmuar, mmutr; |
---|
89 | 89 | struct mm_struct *mm; |
---|
90 | 90 | pgd_t *pgd; |
---|
| 91 | + p4d_t *p4d; |
---|
| 92 | + pud_t *pud; |
---|
91 | 93 | pmd_t *pmd; |
---|
92 | 94 | pte_t *pte; |
---|
93 | 95 | int asid; |
---|
.. | .. |
---|
109 | 111 | return -1; |
---|
110 | 112 | } |
---|
111 | 113 | |
---|
112 | | - pmd = pmd_offset(pgd, mmuar); |
---|
| 114 | + p4d = p4d_offset(pgd, mmuar); |
---|
| 115 | + if (p4d_none(*p4d)) { |
---|
| 116 | + local_irq_restore(flags); |
---|
| 117 | + return -1; |
---|
| 118 | + } |
---|
| 119 | + |
---|
| 120 | + pud = pud_offset(p4d, mmuar); |
---|
| 121 | + if (pud_none(*pud)) { |
---|
| 122 | + local_irq_restore(flags); |
---|
| 123 | + return -1; |
---|
| 124 | + } |
---|
| 125 | + |
---|
| 126 | + pmd = pmd_offset(pud, mmuar); |
---|
113 | 127 | if (pmd_none(*pmd)) { |
---|
114 | 128 | local_irq_restore(flags); |
---|
115 | 129 | return -1; |
---|
.. | .. |
---|
200 | 214 | |
---|
201 | 215 | /* |
---|
202 | 216 | * Steal a context from a task that has one at the moment. |
---|
203 | | - * This is only used on 8xx and 4xx and we presently assume that |
---|
204 | | - * they don't do SMP. If they do then thicfpgalloc.hs will have to check |
---|
205 | | - * whether the MM we steal is in use. |
---|
206 | | - * We also assume that this is only used on systems that don't |
---|
207 | | - * use an MMU hash table - this is true for 8xx and 4xx. |
---|
208 | 217 | * This isn't an LRU system, it just frees up each context in |
---|
209 | 218 | * turn (sort-of pseudo-random replacement :). This would be the |
---|
210 | 219 | * place to implement an LRU scheme if anyone was motivated to do it. |
---|