| .. | .. |
|---|
| 18 | 18 | #include <linux/string.h> |
|---|
| 19 | 19 | #include <linux/types.h> |
|---|
| 20 | 20 | #include <linux/init.h> |
|---|
| 21 | | -#include <linux/bootmem.h> |
|---|
| 22 | 21 | #include <linux/memblock.h> |
|---|
| 23 | 22 | #include <linux/gfp.h> |
|---|
| 24 | 23 | |
|---|
| .. | .. |
|---|
| 46 | 45 | EXPORT_SYMBOL(mm_cachebits); |
|---|
| 47 | 46 | #endif |
|---|
| 48 | 47 | |
|---|
| 48 | +/* Prior to calling these routines, the page should have been flushed |
|---|
| 49 | + * from both the cache and ATC, or the CPU might not notice that the |
|---|
| 50 | + * cache setting for the page has been changed. -jskov |
|---|
| 51 | + */ |
|---|
| 52 | +static inline void nocache_page(void *vaddr) |
|---|
| 53 | +{ |
|---|
| 54 | + unsigned long addr = (unsigned long)vaddr; |
|---|
| 55 | + |
|---|
| 56 | + if (CPU_IS_040_OR_060) { |
|---|
| 57 | + pte_t *ptep = virt_to_kpte(addr); |
|---|
| 58 | + |
|---|
| 59 | + *ptep = pte_mknocache(*ptep); |
|---|
| 60 | + } |
|---|
| 61 | +} |
|---|
| 62 | + |
|---|
| 63 | +static inline void cache_page(void *vaddr) |
|---|
| 64 | +{ |
|---|
| 65 | + unsigned long addr = (unsigned long)vaddr; |
|---|
| 66 | + |
|---|
| 67 | + if (CPU_IS_040_OR_060) { |
|---|
| 68 | + pte_t *ptep = virt_to_kpte(addr); |
|---|
| 69 | + |
|---|
| 70 | + *ptep = pte_mkcache(*ptep); |
|---|
| 71 | + } |
|---|
| 72 | +} |
|---|
| 73 | + |
|---|
| 74 | +/* |
|---|
| 75 | + * Motorola 680x0 user's manual recommends using uncached memory for address |
|---|
| 76 | + * translation tables. |
|---|
| 77 | + * |
|---|
| 78 | + * Seeing how the MMU can be external on (some of) these chips, that seems like |
|---|
| 79 | + * a very important recommendation to follow. Provide some helpers to combat |
|---|
| 80 | + * 'variation' amongst the users of this. |
|---|
| 81 | + */ |
|---|
| 82 | + |
|---|
| 83 | +void mmu_page_ctor(void *page) |
|---|
| 84 | +{ |
|---|
| 85 | + __flush_page_to_ram(page); |
|---|
| 86 | + flush_tlb_kernel_page(page); |
|---|
| 87 | + nocache_page(page); |
|---|
| 88 | +} |
|---|
| 89 | + |
|---|
| 90 | +void mmu_page_dtor(void *page) |
|---|
| 91 | +{ |
|---|
| 92 | + cache_page(page); |
|---|
| 93 | +} |
|---|
| 94 | + |
|---|
| 95 | +/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from |
|---|
| 96 | + struct page instead of separately kmalloced struct. Stolen from |
|---|
| 97 | + arch/sparc/mm/srmmu.c ... */ |
|---|
| 98 | + |
|---|
| 99 | +typedef struct list_head ptable_desc; |
|---|
| 100 | + |
|---|
| 101 | +static struct list_head ptable_list[2] = { |
|---|
| 102 | + LIST_HEAD_INIT(ptable_list[0]), |
|---|
| 103 | + LIST_HEAD_INIT(ptable_list[1]), |
|---|
| 104 | +}; |
|---|
| 105 | + |
|---|
| 106 | +#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru)) |
|---|
| 107 | +#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru)) |
|---|
| 108 | +#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index) |
|---|
| 109 | + |
|---|
| 110 | +static const int ptable_shift[2] = { |
|---|
| 111 | + 7+2, /* PGD, PMD */ |
|---|
| 112 | + 6+2, /* PTE */ |
|---|
| 113 | +}; |
|---|
| 114 | + |
|---|
| 115 | +#define ptable_size(type) (1U << ptable_shift[type]) |
|---|
| 116 | +#define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1) |
|---|
| 117 | + |
|---|
| 118 | +void __init init_pointer_table(void *table, int type) |
|---|
| 119 | +{ |
|---|
| 120 | + ptable_desc *dp; |
|---|
| 121 | + unsigned long ptable = (unsigned long)table; |
|---|
| 122 | + unsigned long page = ptable & PAGE_MASK; |
|---|
| 123 | + unsigned int mask = 1U << ((ptable - page)/ptable_size(type)); |
|---|
| 124 | + |
|---|
| 125 | + dp = PD_PTABLE(page); |
|---|
| 126 | + if (!(PD_MARKBITS(dp) & mask)) { |
|---|
| 127 | + PD_MARKBITS(dp) = ptable_mask(type); |
|---|
| 128 | + list_add(dp, &ptable_list[type]); |
|---|
| 129 | + } |
|---|
| 130 | + |
|---|
| 131 | + PD_MARKBITS(dp) &= ~mask; |
|---|
| 132 | + pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp)); |
|---|
| 133 | + |
|---|
| 134 | + /* unreserve the page so it's possible to free that page */ |
|---|
| 135 | + __ClearPageReserved(PD_PAGE(dp)); |
|---|
| 136 | + init_page_count(PD_PAGE(dp)); |
|---|
| 137 | + |
|---|
| 138 | + return; |
|---|
| 139 | +} |
|---|
| 140 | + |
|---|
| 141 | +void *get_pointer_table(int type) |
|---|
| 142 | +{ |
|---|
| 143 | + ptable_desc *dp = ptable_list[type].next; |
|---|
| 144 | + unsigned int mask = list_empty(&ptable_list[type]) ? 0 : PD_MARKBITS(dp); |
|---|
| 145 | + unsigned int tmp, off; |
|---|
| 146 | + |
|---|
| 147 | + /* |
|---|
| 148 | + * For a pointer table for a user process address space, a |
|---|
| 149 | + * table is taken from a page allocated for the purpose. Each |
|---|
| 150 | + * page can hold 8 pointer tables. The page is remapped in |
|---|
| 151 | + * virtual address space to be noncacheable. |
|---|
| 152 | + */ |
|---|
| 153 | + if (mask == 0) { |
|---|
| 154 | + void *page; |
|---|
| 155 | + ptable_desc *new; |
|---|
| 156 | + |
|---|
| 157 | + if (!(page = (void *)get_zeroed_page(GFP_KERNEL))) |
|---|
| 158 | + return NULL; |
|---|
| 159 | + |
|---|
| 160 | + if (type == TABLE_PTE) { |
|---|
| 161 | + /* |
|---|
| 162 | + * m68k doesn't have SPLIT_PTE_PTLOCKS for not having |
|---|
| 163 | + * SMP. |
|---|
| 164 | + */ |
|---|
| 165 | + pgtable_pte_page_ctor(virt_to_page(page)); |
|---|
| 166 | + } |
|---|
| 167 | + |
|---|
| 168 | + mmu_page_ctor(page); |
|---|
| 169 | + |
|---|
| 170 | + new = PD_PTABLE(page); |
|---|
| 171 | + PD_MARKBITS(new) = ptable_mask(type) - 1; |
|---|
| 172 | + list_add_tail(new, dp); |
|---|
| 173 | + |
|---|
| 174 | + return (pmd_t *)page; |
|---|
| 175 | + } |
|---|
| 176 | + |
|---|
| 177 | + for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += ptable_size(type)) |
|---|
| 178 | + ; |
|---|
| 179 | + PD_MARKBITS(dp) = mask & ~tmp; |
|---|
| 180 | + if (!PD_MARKBITS(dp)) { |
|---|
| 181 | + /* move to end of list */ |
|---|
| 182 | + list_move_tail(dp, &ptable_list[type]); |
|---|
| 183 | + } |
|---|
| 184 | + return page_address(PD_PAGE(dp)) + off; |
|---|
| 185 | +} |
|---|
| 186 | + |
|---|
| 187 | +int free_pointer_table(void *table, int type) |
|---|
| 188 | +{ |
|---|
| 189 | + ptable_desc *dp; |
|---|
| 190 | + unsigned long ptable = (unsigned long)table; |
|---|
| 191 | + unsigned long page = ptable & PAGE_MASK; |
|---|
| 192 | + unsigned int mask = 1U << ((ptable - page)/ptable_size(type)); |
|---|
| 193 | + |
|---|
| 194 | + dp = PD_PTABLE(page); |
|---|
| 195 | + if (PD_MARKBITS (dp) & mask) |
|---|
| 196 | + panic ("table already free!"); |
|---|
| 197 | + |
|---|
| 198 | + PD_MARKBITS (dp) |= mask; |
|---|
| 199 | + |
|---|
| 200 | + if (PD_MARKBITS(dp) == ptable_mask(type)) { |
|---|
| 201 | + /* all tables in page are free, free page */ |
|---|
| 202 | + list_del(dp); |
|---|
| 203 | + mmu_page_dtor((void *)page); |
|---|
| 204 | + if (type == TABLE_PTE) |
|---|
| 205 | + pgtable_pte_page_dtor(virt_to_page(page)); |
|---|
| 206 | + free_page (page); |
|---|
| 207 | + return 1; |
|---|
| 208 | + } else if (ptable_list[type].next != dp) { |
|---|
| 209 | + /* |
|---|
| 210 | + * move this descriptor to the front of the list, since |
|---|
| 211 | + * it has one or more free tables. |
|---|
| 212 | + */ |
|---|
| 213 | + list_move(dp, &ptable_list[type]); |
|---|
| 214 | + } |
|---|
| 215 | + return 0; |
|---|
| 216 | +} |
|---|
| 217 | + |
|---|
| 49 | 218 | /* size of memory already mapped in head.S */ |
|---|
| 50 | 219 | extern __initdata unsigned long m68k_init_mapped_size; |
|---|
| 51 | 220 | |
|---|
| 52 | 221 | extern unsigned long availmem; |
|---|
| 53 | 222 | |
|---|
| 223 | +static pte_t *last_pte_table __initdata = NULL; |
|---|
| 224 | + |
|---|
| 54 | 225 | static pte_t * __init kernel_page_table(void) |
|---|
| 55 | 226 | { |
|---|
| 56 | | - pte_t *ptablep; |
|---|
| 227 | + pte_t *pte_table = last_pte_table; |
|---|
| 57 | 228 | |
|---|
| 58 | | - ptablep = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); |
|---|
| 229 | + if (PAGE_ALIGNED(last_pte_table)) { |
|---|
| 230 | + pte_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); |
|---|
| 231 | + if (!pte_table) { |
|---|
| 232 | + panic("%s: Failed to allocate %lu bytes align=%lx\n", |
|---|
| 233 | + __func__, PAGE_SIZE, PAGE_SIZE); |
|---|
| 234 | + } |
|---|
| 59 | 235 | |
|---|
| 60 | | - clear_page(ptablep); |
|---|
| 61 | | - __flush_page_to_ram(ptablep); |
|---|
| 62 | | - flush_tlb_kernel_page(ptablep); |
|---|
| 63 | | - nocache_page(ptablep); |
|---|
| 236 | + clear_page(pte_table); |
|---|
| 237 | + mmu_page_ctor(pte_table); |
|---|
| 64 | 238 | |
|---|
| 65 | | - return ptablep; |
|---|
| 239 | + last_pte_table = pte_table; |
|---|
| 240 | + } |
|---|
| 241 | + |
|---|
| 242 | + last_pte_table += PTRS_PER_PTE; |
|---|
| 243 | + |
|---|
| 244 | + return pte_table; |
|---|
| 66 | 245 | } |
|---|
| 67 | 246 | |
|---|
| 68 | | -static pmd_t *last_pgtable __initdata = NULL; |
|---|
| 69 | | -pmd_t *zero_pgtable __initdata = NULL; |
|---|
| 247 | +static pmd_t *last_pmd_table __initdata = NULL; |
|---|
| 70 | 248 | |
|---|
| 71 | 249 | static pmd_t * __init kernel_ptr_table(void) |
|---|
| 72 | 250 | { |
|---|
| 73 | | - if (!last_pgtable) { |
|---|
| 251 | + if (!last_pmd_table) { |
|---|
| 74 | 252 | unsigned long pmd, last; |
|---|
| 75 | 253 | int i; |
|---|
| 76 | 254 | |
|---|
| .. | .. |
|---|
| 80 | 258 | */ |
|---|
| 81 | 259 | last = (unsigned long)kernel_pg_dir; |
|---|
| 82 | 260 | for (i = 0; i < PTRS_PER_PGD; i++) { |
|---|
| 83 | | - if (!pgd_present(kernel_pg_dir[i])) |
|---|
| 261 | + pud_t *pud = (pud_t *)(&kernel_pg_dir[i]); |
|---|
| 262 | + |
|---|
| 263 | + if (!pud_present(*pud)) |
|---|
| 84 | 264 | continue; |
|---|
| 85 | | - pmd = __pgd_page(kernel_pg_dir[i]); |
|---|
| 265 | + pmd = pgd_page_vaddr(kernel_pg_dir[i]); |
|---|
| 86 | 266 | if (pmd > last) |
|---|
| 87 | 267 | last = pmd; |
|---|
| 88 | 268 | } |
|---|
| 89 | 269 | |
|---|
| 90 | | - last_pgtable = (pmd_t *)last; |
|---|
| 270 | + last_pmd_table = (pmd_t *)last; |
|---|
| 91 | 271 | #ifdef DEBUG |
|---|
| 92 | | - printk("kernel_ptr_init: %p\n", last_pgtable); |
|---|
| 272 | + printk("kernel_ptr_init: %p\n", last_pmd_table); |
|---|
| 93 | 273 | #endif |
|---|
| 94 | 274 | } |
|---|
| 95 | 275 | |
|---|
| 96 | | - last_pgtable += PTRS_PER_PMD; |
|---|
| 97 | | - if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) { |
|---|
| 98 | | - last_pgtable = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); |
|---|
| 276 | + last_pmd_table += PTRS_PER_PMD; |
|---|
| 277 | + if (PAGE_ALIGNED(last_pmd_table)) { |
|---|
| 278 | + last_pmd_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); |
|---|
| 279 | + if (!last_pmd_table) |
|---|
| 280 | + panic("%s: Failed to allocate %lu bytes align=%lx\n", |
|---|
| 281 | + __func__, PAGE_SIZE, PAGE_SIZE); |
|---|
| 99 | 282 | |
|---|
| 100 | | - clear_page(last_pgtable); |
|---|
| 101 | | - __flush_page_to_ram(last_pgtable); |
|---|
| 102 | | - flush_tlb_kernel_page(last_pgtable); |
|---|
| 103 | | - nocache_page(last_pgtable); |
|---|
| 283 | + clear_page(last_pmd_table); |
|---|
| 284 | + mmu_page_ctor(last_pmd_table); |
|---|
| 104 | 285 | } |
|---|
| 105 | 286 | |
|---|
| 106 | | - return last_pgtable; |
|---|
| 287 | + return last_pmd_table; |
|---|
| 107 | 288 | } |
|---|
| 108 | 289 | |
|---|
| 109 | 290 | static void __init map_node(int node) |
|---|
| 110 | 291 | { |
|---|
| 111 | | -#define PTRTREESIZE (256*1024) |
|---|
| 112 | | -#define ROOTTREESIZE (32*1024*1024) |
|---|
| 113 | 292 | unsigned long physaddr, virtaddr, size; |
|---|
| 114 | 293 | pgd_t *pgd_dir; |
|---|
| 294 | + p4d_t *p4d_dir; |
|---|
| 295 | + pud_t *pud_dir; |
|---|
| 115 | 296 | pmd_t *pmd_dir; |
|---|
| 116 | 297 | pte_t *pte_dir; |
|---|
| 117 | 298 | |
|---|
| .. | .. |
|---|
| 125 | 306 | |
|---|
| 126 | 307 | while (size > 0) { |
|---|
| 127 | 308 | #ifdef DEBUG |
|---|
| 128 | | - if (!(virtaddr & (PTRTREESIZE-1))) |
|---|
| 309 | + if (!(virtaddr & (PMD_SIZE-1))) |
|---|
| 129 | 310 | printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK, |
|---|
| 130 | 311 | virtaddr); |
|---|
| 131 | 312 | #endif |
|---|
| 132 | 313 | pgd_dir = pgd_offset_k(virtaddr); |
|---|
| 133 | 314 | if (virtaddr && CPU_IS_020_OR_030) { |
|---|
| 134 | | - if (!(virtaddr & (ROOTTREESIZE-1)) && |
|---|
| 135 | | - size >= ROOTTREESIZE) { |
|---|
| 315 | + if (!(virtaddr & (PGDIR_SIZE-1)) && |
|---|
| 316 | + size >= PGDIR_SIZE) { |
|---|
| 136 | 317 | #ifdef DEBUG |
|---|
| 137 | 318 | printk ("[very early term]"); |
|---|
| 138 | 319 | #endif |
|---|
| 139 | 320 | pgd_val(*pgd_dir) = physaddr; |
|---|
| 140 | | - size -= ROOTTREESIZE; |
|---|
| 141 | | - virtaddr += ROOTTREESIZE; |
|---|
| 142 | | - physaddr += ROOTTREESIZE; |
|---|
| 321 | + size -= PGDIR_SIZE; |
|---|
| 322 | + virtaddr += PGDIR_SIZE; |
|---|
| 323 | + physaddr += PGDIR_SIZE; |
|---|
| 143 | 324 | continue; |
|---|
| 144 | 325 | } |
|---|
| 145 | 326 | } |
|---|
| 146 | | - if (!pgd_present(*pgd_dir)) { |
|---|
| 327 | + p4d_dir = p4d_offset(pgd_dir, virtaddr); |
|---|
| 328 | + pud_dir = pud_offset(p4d_dir, virtaddr); |
|---|
| 329 | + if (!pud_present(*pud_dir)) { |
|---|
| 147 | 330 | pmd_dir = kernel_ptr_table(); |
|---|
| 148 | 331 | #ifdef DEBUG |
|---|
| 149 | 332 | printk ("[new pointer %p]", pmd_dir); |
|---|
| 150 | 333 | #endif |
|---|
| 151 | | - pgd_set(pgd_dir, pmd_dir); |
|---|
| 334 | + pud_set(pud_dir, pmd_dir); |
|---|
| 152 | 335 | } else |
|---|
| 153 | | - pmd_dir = pmd_offset(pgd_dir, virtaddr); |
|---|
| 336 | + pmd_dir = pmd_offset(pud_dir, virtaddr); |
|---|
| 154 | 337 | |
|---|
| 155 | 338 | if (CPU_IS_020_OR_030) { |
|---|
| 156 | 339 | if (virtaddr) { |
|---|
| 157 | 340 | #ifdef DEBUG |
|---|
| 158 | 341 | printk ("[early term]"); |
|---|
| 159 | 342 | #endif |
|---|
| 160 | | - pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; |
|---|
| 161 | | - physaddr += PTRTREESIZE; |
|---|
| 343 | + pmd_val(*pmd_dir) = physaddr; |
|---|
| 344 | + physaddr += PMD_SIZE; |
|---|
| 162 | 345 | } else { |
|---|
| 163 | 346 | int i; |
|---|
| 164 | 347 | #ifdef DEBUG |
|---|
| 165 | 348 | printk ("[zero map]"); |
|---|
| 166 | 349 | #endif |
|---|
| 167 | | - zero_pgtable = kernel_ptr_table(); |
|---|
| 168 | | - pte_dir = (pte_t *)zero_pgtable; |
|---|
| 169 | | - pmd_dir->pmd[0] = virt_to_phys(pte_dir) | |
|---|
| 170 | | - _PAGE_TABLE | _PAGE_ACCESSED; |
|---|
| 350 | + pte_dir = kernel_page_table(); |
|---|
| 351 | + pmd_set(pmd_dir, pte_dir); |
|---|
| 352 | + |
|---|
| 171 | 353 | pte_val(*pte_dir++) = 0; |
|---|
| 172 | 354 | physaddr += PAGE_SIZE; |
|---|
| 173 | | - for (i = 1; i < 64; physaddr += PAGE_SIZE, i++) |
|---|
| 355 | + for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++) |
|---|
| 174 | 356 | pte_val(*pte_dir++) = physaddr; |
|---|
| 175 | 357 | } |
|---|
| 176 | | - size -= PTRTREESIZE; |
|---|
| 177 | | - virtaddr += PTRTREESIZE; |
|---|
| 358 | + size -= PMD_SIZE; |
|---|
| 359 | + virtaddr += PMD_SIZE; |
|---|
| 178 | 360 | } else { |
|---|
| 179 | 361 | if (!pmd_present(*pmd_dir)) { |
|---|
| 180 | 362 | #ifdef DEBUG |
|---|
| .. | .. |
|---|
| 207 | 389 | */ |
|---|
| 208 | 390 | void __init paging_init(void) |
|---|
| 209 | 391 | { |
|---|
| 210 | | - unsigned long zones_size[MAX_NR_ZONES] = { 0, }; |
|---|
| 392 | + unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, }; |
|---|
| 211 | 393 | unsigned long min_addr, max_addr; |
|---|
| 212 | 394 | unsigned long addr; |
|---|
| 213 | 395 | int i; |
|---|
| .. | .. |
|---|
| 228 | 410 | |
|---|
| 229 | 411 | min_addr = m68k_memory[0].addr; |
|---|
| 230 | 412 | max_addr = min_addr + m68k_memory[0].size; |
|---|
| 231 | | - memblock_add(m68k_memory[0].addr, m68k_memory[0].size); |
|---|
| 413 | + memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0); |
|---|
| 232 | 414 | for (i = 1; i < m68k_num_memory;) { |
|---|
| 233 | 415 | if (m68k_memory[i].addr < min_addr) { |
|---|
| 234 | 416 | printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n", |
|---|
| .. | .. |
|---|
| 239 | 421 | (m68k_num_memory - i) * sizeof(struct m68k_mem_info)); |
|---|
| 240 | 422 | continue; |
|---|
| 241 | 423 | } |
|---|
| 242 | | - memblock_add(m68k_memory[i].addr, m68k_memory[i].size); |
|---|
| 424 | + memblock_add_node(m68k_memory[i].addr, m68k_memory[i].size, i); |
|---|
| 243 | 425 | addr = m68k_memory[i].addr + m68k_memory[i].size; |
|---|
| 244 | 426 | if (addr > max_addr) |
|---|
| 245 | 427 | max_addr = addr; |
|---|
| .. | .. |
|---|
| 277 | 459 | * initialize the bad page table and bad page to point |
|---|
| 278 | 460 | * to a couple of allocated pages |
|---|
| 279 | 461 | */ |
|---|
| 280 | | - empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); |
|---|
| 462 | + empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
|---|
| 463 | + if (!empty_zero_page) |
|---|
| 464 | + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", |
|---|
| 465 | + __func__, PAGE_SIZE, PAGE_SIZE); |
|---|
| 281 | 466 | |
|---|
| 282 | 467 | /* |
|---|
| 283 | 468 | * Set up SFC/DFC registers |
|---|
| .. | .. |
|---|
| 287 | 472 | #ifdef DEBUG |
|---|
| 288 | 473 | printk ("before free_area_init\n"); |
|---|
| 289 | 474 | #endif |
|---|
| 290 | | - for (i = 0; i < m68k_num_memory; i++) { |
|---|
| 291 | | - zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT; |
|---|
| 292 | | - free_area_init_node(i, zones_size, |
|---|
| 293 | | - m68k_memory[i].addr >> PAGE_SHIFT, NULL); |
|---|
| 475 | + for (i = 0; i < m68k_num_memory; i++) |
|---|
| 294 | 476 | if (node_present_pages(i)) |
|---|
| 295 | 477 | node_set_state(i, N_NORMAL_MEMORY); |
|---|
| 296 | | - } |
|---|
| 297 | | -} |
|---|
| 298 | 478 | |
|---|
| 479 | + max_zone_pfn[ZONE_DMA] = memblock_end_of_DRAM(); |
|---|
| 480 | + free_area_init(max_zone_pfn); |
|---|
| 481 | +} |
|---|