.. | .. |
---|
22 | 22 | * Pages used for the page tables is a different story. FIXME: more |
---|
23 | 23 | */ |
---|
24 | 24 | |
---|
25 | | -#include <linux/mm.h> |
---|
26 | | -#include <linux/pagemap.h> |
---|
27 | | -#include <linux/swap.h> |
---|
28 | | -#include <asm/processor.h> |
---|
29 | | -#include <asm/pgalloc.h> |
---|
| 25 | +void __tlb_remove_table(void *_table); |
---|
| 26 | +static inline void tlb_flush(struct mmu_gather *tlb); |
---|
| 27 | +static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, |
---|
| 28 | + struct page *page, int page_size); |
---|
| 29 | + |
---|
| 30 | +#define tlb_start_vma(tlb, vma) do { } while (0) |
---|
| 31 | +#define tlb_end_vma(tlb, vma) do { } while (0) |
---|
| 32 | + |
---|
| 33 | +#define tlb_flush tlb_flush |
---|
| 34 | +#define pte_free_tlb pte_free_tlb |
---|
| 35 | +#define pmd_free_tlb pmd_free_tlb |
---|
| 36 | +#define p4d_free_tlb p4d_free_tlb |
---|
| 37 | +#define pud_free_tlb pud_free_tlb |
---|
| 38 | + |
---|
30 | 39 | #include <asm/tlbflush.h> |
---|
31 | | - |
---|
32 | | -struct mmu_gather { |
---|
33 | | - struct mm_struct *mm; |
---|
34 | | - struct mmu_table_batch *batch; |
---|
35 | | - unsigned int fullmm; |
---|
36 | | - unsigned long start, end; |
---|
37 | | -}; |
---|
38 | | - |
---|
39 | | -struct mmu_table_batch { |
---|
40 | | - struct rcu_head rcu; |
---|
41 | | - unsigned int nr; |
---|
42 | | - void *tables[0]; |
---|
43 | | -}; |
---|
44 | | - |
---|
45 | | -#define MAX_TABLE_BATCH \ |
---|
46 | | - ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) |
---|
47 | | - |
---|
48 | | -extern void tlb_table_flush(struct mmu_gather *tlb); |
---|
49 | | -extern void tlb_remove_table(struct mmu_gather *tlb, void *table); |
---|
50 | | - |
---|
51 | | -static inline void |
---|
52 | | -arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, |
---|
53 | | - unsigned long start, unsigned long end) |
---|
54 | | -{ |
---|
55 | | - tlb->mm = mm; |
---|
56 | | - tlb->start = start; |
---|
57 | | - tlb->end = end; |
---|
58 | | - tlb->fullmm = !(start | (end+1)); |
---|
59 | | - tlb->batch = NULL; |
---|
60 | | -} |
---|
61 | | - |
---|
62 | | -static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) |
---|
63 | | -{ |
---|
64 | | - __tlb_flush_mm_lazy(tlb->mm); |
---|
65 | | -} |
---|
66 | | - |
---|
67 | | -static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) |
---|
68 | | -{ |
---|
69 | | - tlb_table_flush(tlb); |
---|
70 | | -} |
---|
71 | | - |
---|
72 | | - |
---|
73 | | -static inline void tlb_flush_mmu(struct mmu_gather *tlb) |
---|
74 | | -{ |
---|
75 | | - tlb_flush_mmu_tlbonly(tlb); |
---|
76 | | - tlb_flush_mmu_free(tlb); |
---|
77 | | -} |
---|
78 | | - |
---|
79 | | -static inline void |
---|
80 | | -arch_tlb_finish_mmu(struct mmu_gather *tlb, |
---|
81 | | - unsigned long start, unsigned long end, bool force) |
---|
82 | | -{ |
---|
83 | | - if (force) { |
---|
84 | | - tlb->start = start; |
---|
85 | | - tlb->end = end; |
---|
86 | | - } |
---|
87 | | - |
---|
88 | | - tlb_flush_mmu(tlb); |
---|
89 | | -} |
---|
| 40 | +#include <asm-generic/tlb.h> |
---|
90 | 41 | |
---|
91 | 42 | /* |
---|
92 | 43 | * Release the page cache reference for a pte removed by |
---|
93 | 44 | * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page |
---|
94 | 45 | * has already been freed, so just do free_page_and_swap_cache. |
---|
95 | 46 | */ |
---|
96 | | -static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
---|
97 | | -{ |
---|
98 | | - free_page_and_swap_cache(page); |
---|
99 | | - return false; /* avoid calling tlb_flush_mmu */ |
---|
100 | | -} |
---|
101 | | - |
---|
102 | | -static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
---|
103 | | -{ |
---|
104 | | - free_page_and_swap_cache(page); |
---|
105 | | -} |
---|
106 | | - |
---|
107 | 47 | static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, |
---|
108 | 48 | struct page *page, int page_size) |
---|
109 | 49 | { |
---|
110 | | - return __tlb_remove_page(tlb, page); |
---|
| 50 | + free_page_and_swap_cache(page); |
---|
| 51 | + return false; |
---|
111 | 52 | } |
---|
112 | 53 | |
---|
113 | | -static inline void tlb_remove_page_size(struct mmu_gather *tlb, |
---|
114 | | - struct page *page, int page_size) |
---|
| 54 | +static inline void tlb_flush(struct mmu_gather *tlb) |
---|
115 | 55 | { |
---|
116 | | - return tlb_remove_page(tlb, page); |
---|
117 | | -} |
---|
118 | | - |
---|
119 | | -static inline void tlb_flush_pmd_range(struct mmu_gather *tlb, |
---|
120 | | - unsigned long address, unsigned long size) |
---|
121 | | -{ |
---|
122 | | - /* |
---|
123 | | - * the range might exceed the original range that was provided to |
---|
124 | | - * tlb_gather_mmu(), so we need to update it despite the fact it is |
---|
125 | | - * usually not updated. |
---|
126 | | - */ |
---|
127 | | - if (tlb->start > address) |
---|
128 | | - tlb->start = address; |
---|
129 | | - if (tlb->end < address + size) |
---|
130 | | - tlb->end = address + size; |
---|
| 56 | + __tlb_flush_mm_lazy(tlb->mm); |
---|
131 | 57 | } |
---|
132 | 58 | |
---|
133 | 59 | /* |
---|
.. | .. |
---|
135 | 61 | * page table from the tlb. |
---|
136 | 62 | */ |
---|
137 | 63 | static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, |
---|
138 | | - unsigned long address) |
---|
| 64 | + unsigned long address) |
---|
139 | 65 | { |
---|
| 66 | + __tlb_adjust_range(tlb, address, PAGE_SIZE); |
---|
| 67 | + tlb->mm->context.flush_mm = 1; |
---|
| 68 | + tlb->freed_tables = 1; |
---|
| 69 | + tlb->cleared_ptes = 1; |
---|
| 70 | + /* |
---|
| 71 | + * page_table_free_rcu takes care of the allocation bit masks |
---|
| 72 | + * of the 2K table fragments in the 4K page table page, |
---|
| 73 | + * then calls tlb_remove_table. |
---|
| 74 | + */ |
---|
140 | 75 | page_table_free_rcu(tlb, (unsigned long *) pte, address); |
---|
141 | 76 | } |
---|
142 | 77 | |
---|
.. | .. |
---|
153 | 88 | if (mm_pmd_folded(tlb->mm)) |
---|
154 | 89 | return; |
---|
155 | 90 | pgtable_pmd_page_dtor(virt_to_page(pmd)); |
---|
| 91 | + __tlb_adjust_range(tlb, address, PAGE_SIZE); |
---|
| 92 | + tlb->mm->context.flush_mm = 1; |
---|
| 93 | + tlb->freed_tables = 1; |
---|
| 94 | + tlb->cleared_puds = 1; |
---|
156 | 95 | tlb_remove_table(tlb, pmd); |
---|
157 | 96 | } |
---|
158 | 97 | |
---|
.. | .. |
---|
168 | 107 | { |
---|
169 | 108 | if (mm_p4d_folded(tlb->mm)) |
---|
170 | 109 | return; |
---|
| 110 | + __tlb_adjust_range(tlb, address, PAGE_SIZE); |
---|
| 111 | + tlb->mm->context.flush_mm = 1; |
---|
| 112 | + tlb->freed_tables = 1; |
---|
| 113 | + tlb->cleared_p4ds = 1; |
---|
171 | 114 | tlb_remove_table(tlb, p4d); |
---|
172 | 115 | } |
---|
173 | 116 | |
---|
.. | .. |
---|
183 | 126 | { |
---|
184 | 127 | if (mm_pud_folded(tlb->mm)) |
---|
185 | 128 | return; |
---|
| 129 | + tlb->mm->context.flush_mm = 1; |
---|
| 130 | + tlb->freed_tables = 1; |
---|
| 131 | + tlb->cleared_puds = 1; |
---|
186 | 132 | tlb_remove_table(tlb, pud); |
---|
187 | 133 | } |
---|
188 | 134 | |
---|
189 | | -#define tlb_start_vma(tlb, vma) do { } while (0) |
---|
190 | | -#define tlb_end_vma(tlb, vma) do { } while (0) |
---|
191 | | -#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0) |
---|
192 | | -#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0) |
---|
193 | | -#define tlb_migrate_finish(mm) do { } while (0) |
---|
194 | | -#define tlb_flush_pmd_range(tlb, addr, sz) do { } while (0) |
---|
195 | | - |
---|
196 | | -#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ |
---|
197 | | - tlb_remove_tlb_entry(tlb, ptep, address) |
---|
198 | | - |
---|
199 | | -#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change |
---|
200 | | -static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, |
---|
201 | | - unsigned int page_size) |
---|
202 | | -{ |
---|
203 | | -} |
---|
204 | 135 | |
---|
205 | 136 | #endif /* _S390_TLB_H */ |
---|