.. | .. |
---|
12 | 12 | |
---|
13 | 13 | extern const char bad_pmd_string[]; |
---|
14 | 14 | |
---|
15 | | -extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
---|
16 | | - unsigned long address) |
---|
| 15 | +extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) |
---|
17 | 16 | { |
---|
18 | 17 | unsigned long page = __get_free_page(GFP_DMA); |
---|
19 | 18 | |
---|
.. | .. |
---|
29 | 28 | return (pmd_t *) pgd; |
---|
30 | 29 | } |
---|
31 | 30 | |
---|
32 | | -#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); }) |
---|
33 | | -#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) |
---|
| 31 | +#define pmd_populate(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte)) |
---|
34 | 32 | |
---|
35 | | -#define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr) |
---|
| 33 | +#define pmd_populate_kernel pmd_populate |
---|
36 | 34 | |
---|
37 | | -#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \ |
---|
38 | | - (unsigned long)(page_address(page))) |
---|
| 35 | +#define pmd_pgtable(pmd) pfn_to_virt(pmd_val(pmd) >> PAGE_SHIFT) |
---|
39 | 36 | |
---|
40 | | -#define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte)) |
---|
41 | | - |
---|
42 | | -#define pmd_pgtable(pmd) pmd_page(pmd) |
---|
43 | | - |
---|
44 | | -static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, |
---|
| 37 | +static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable, |
---|
45 | 38 | unsigned long address) |
---|
46 | 39 | { |
---|
47 | | - pgtable_page_dtor(page); |
---|
| 40 | + struct page *page = virt_to_page(pgtable); |
---|
| 41 | + |
---|
| 42 | + pgtable_pte_page_dtor(page); |
---|
48 | 43 | __free_page(page); |
---|
49 | 44 | } |
---|
50 | 45 | |
---|
51 | | -#define __pmd_free_tlb(tlb, pmd, address) do { } while (0) |
---|
52 | | - |
---|
53 | | -static inline struct page *pte_alloc_one(struct mm_struct *mm, |
---|
54 | | - unsigned long address) |
---|
| 46 | +static inline pgtable_t pte_alloc_one(struct mm_struct *mm) |
---|
55 | 47 | { |
---|
56 | 48 | struct page *page = alloc_pages(GFP_DMA, 0); |
---|
57 | 49 | pte_t *pte; |
---|
58 | 50 | |
---|
59 | 51 | if (!page) |
---|
60 | 52 | return NULL; |
---|
61 | | - if (!pgtable_page_ctor(page)) { |
---|
| 53 | + if (!pgtable_pte_page_ctor(page)) { |
---|
62 | 54 | __free_page(page); |
---|
63 | 55 | return NULL; |
---|
64 | 56 | } |
---|
65 | 57 | |
---|
66 | | - pte = kmap(page); |
---|
67 | | - if (pte) { |
---|
68 | | - clear_page(pte); |
---|
69 | | - __flush_page_to_ram(pte); |
---|
70 | | - flush_tlb_kernel_page(pte); |
---|
71 | | - nocache_page(pte); |
---|
72 | | - } |
---|
73 | | - kunmap(page); |
---|
| 58 | + pte = page_address(page); |
---|
| 59 | + clear_page(pte); |
---|
74 | 60 | |
---|
75 | | - return page; |
---|
| 61 | + return pte; |
---|
76 | 62 | } |
---|
77 | 63 | |
---|
78 | | -static inline void pte_free(struct mm_struct *mm, struct page *page) |
---|
| 64 | +static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable) |
---|
79 | 65 | { |
---|
80 | | - pgtable_page_dtor(page); |
---|
| 66 | + struct page *page = virt_to_page(pgtable); |
---|
| 67 | + |
---|
| 68 | + pgtable_pte_page_dtor(page); |
---|
81 | 69 | __free_page(page); |
---|
82 | 70 | } |
---|
83 | 71 | |
---|
.. | .. |
---|
99 | 87 | new_pgd = (pgd_t *)__get_free_page(GFP_DMA | __GFP_NOWARN); |
---|
100 | 88 | if (!new_pgd) |
---|
101 | 89 | return NULL; |
---|
102 | | - memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE); |
---|
| 90 | + memcpy(new_pgd, swapper_pg_dir, PTRS_PER_PGD * sizeof(pgd_t)); |
---|
103 | 91 | memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT); |
---|
104 | 92 | return new_pgd; |
---|
105 | 93 | } |
---|
106 | | - |
---|
107 | | -#define pgd_populate(mm, pmd, pte) BUG() |
---|
108 | 94 | |
---|
109 | 95 | #endif /* M68K_MCF_PGALLOC_H */ |
---|