.. | .. |
---|
2 | 2 | #ifndef __UM_TLB_H |
---|
3 | 3 | #define __UM_TLB_H |
---|
4 | 4 | |
---|
5 | | -#include <linux/pagemap.h> |
---|
6 | | -#include <linux/swap.h> |
---|
7 | | -#include <asm/percpu.h> |
---|
8 | | -#include <asm/pgalloc.h> |
---|
| 5 | +#include <linux/mm.h> |
---|
| 6 | + |
---|
9 | 7 | #include <asm/tlbflush.h> |
---|
10 | | - |
---|
11 | | -#define tlb_start_vma(tlb, vma) do { } while (0) |
---|
12 | | -#define tlb_end_vma(tlb, vma) do { } while (0) |
---|
13 | | -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) |
---|
14 | | - |
---|
15 | | -/* struct mmu_gather is an opaque type used by the mm code for passing around |
---|
16 | | - * any data needed by arch specific code for tlb_remove_page. |
---|
17 | | - */ |
---|
18 | | -struct mmu_gather { |
---|
19 | | - struct mm_struct *mm; |
---|
20 | | - unsigned int need_flush; /* Really unmapped some ptes? */ |
---|
21 | | - unsigned long start; |
---|
22 | | - unsigned long end; |
---|
23 | | - unsigned int fullmm; /* non-zero means full mm flush */ |
---|
24 | | -}; |
---|
25 | | - |
---|
26 | | -static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, |
---|
27 | | - unsigned long address) |
---|
28 | | -{ |
---|
29 | | - if (tlb->start > address) |
---|
30 | | - tlb->start = address; |
---|
31 | | - if (tlb->end < address + PAGE_SIZE) |
---|
32 | | - tlb->end = address + PAGE_SIZE; |
---|
33 | | -} |
---|
34 | | - |
---|
35 | | -static inline void init_tlb_gather(struct mmu_gather *tlb) |
---|
36 | | -{ |
---|
37 | | - tlb->need_flush = 0; |
---|
38 | | - |
---|
39 | | - tlb->start = TASK_SIZE; |
---|
40 | | - tlb->end = 0; |
---|
41 | | - |
---|
42 | | - if (tlb->fullmm) { |
---|
43 | | - tlb->start = 0; |
---|
44 | | - tlb->end = TASK_SIZE; |
---|
45 | | - } |
---|
46 | | -} |
---|
47 | | - |
---|
48 | | -static inline void |
---|
49 | | -arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, |
---|
50 | | - unsigned long start, unsigned long end) |
---|
51 | | -{ |
---|
52 | | - tlb->mm = mm; |
---|
53 | | - tlb->start = start; |
---|
54 | | - tlb->end = end; |
---|
55 | | - tlb->fullmm = !(start | (end+1)); |
---|
56 | | - |
---|
57 | | - init_tlb_gather(tlb); |
---|
58 | | -} |
---|
59 | | - |
---|
60 | | -extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
---|
61 | | - unsigned long end); |
---|
62 | | - |
---|
63 | | -static inline void |
---|
64 | | -tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) |
---|
65 | | -{ |
---|
66 | | - flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end); |
---|
67 | | -} |
---|
68 | | - |
---|
69 | | -static inline void |
---|
70 | | -tlb_flush_mmu_free(struct mmu_gather *tlb) |
---|
71 | | -{ |
---|
72 | | - init_tlb_gather(tlb); |
---|
73 | | -} |
---|
74 | | - |
---|
75 | | -static inline void |
---|
76 | | -tlb_flush_mmu(struct mmu_gather *tlb) |
---|
77 | | -{ |
---|
78 | | - if (!tlb->need_flush) |
---|
79 | | - return; |
---|
80 | | - |
---|
81 | | - tlb_flush_mmu_tlbonly(tlb); |
---|
82 | | - tlb_flush_mmu_free(tlb); |
---|
83 | | -} |
---|
84 | | - |
---|
85 | | -/* arch_tlb_finish_mmu |
---|
86 | | - * Called at the end of the shootdown operation to free up any resources |
---|
87 | | - * that were required. |
---|
88 | | - */ |
---|
89 | | -static inline void |
---|
90 | | -arch_tlb_finish_mmu(struct mmu_gather *tlb, |
---|
91 | | - unsigned long start, unsigned long end, bool force) |
---|
92 | | -{ |
---|
93 | | - if (force) { |
---|
94 | | - tlb->start = start; |
---|
95 | | - tlb->end = end; |
---|
96 | | - tlb->need_flush = 1; |
---|
97 | | - } |
---|
98 | | - tlb_flush_mmu(tlb); |
---|
99 | | - |
---|
100 | | - /* keep the page table cache within bounds */ |
---|
101 | | - check_pgt_cache(); |
---|
102 | | -} |
---|
103 | | - |
---|
104 | | -/* tlb_remove_page |
---|
105 | | - * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), |
---|
106 | | - * while handling the additional races in SMP caused by other CPUs |
---|
107 | | - * caching valid mappings in their TLBs. |
---|
108 | | - */ |
---|
109 | | -static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
---|
110 | | -{ |
---|
111 | | - tlb->need_flush = 1; |
---|
112 | | - free_page_and_swap_cache(page); |
---|
113 | | - return false; /* avoid calling tlb_flush_mmu */ |
---|
114 | | -} |
---|
115 | | - |
---|
116 | | -static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
---|
117 | | -{ |
---|
118 | | - __tlb_remove_page(tlb, page); |
---|
119 | | -} |
---|
120 | | - |
---|
121 | | -static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, |
---|
122 | | - struct page *page, int page_size) |
---|
123 | | -{ |
---|
124 | | - return __tlb_remove_page(tlb, page); |
---|
125 | | -} |
---|
126 | | - |
---|
127 | | -static inline void tlb_remove_page_size(struct mmu_gather *tlb, |
---|
128 | | - struct page *page, int page_size) |
---|
129 | | -{ |
---|
130 | | - return tlb_remove_page(tlb, page); |
---|
131 | | -} |
---|
132 | | - |
---|
133 | | -static inline void |
---|
134 | | -tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address, |
---|
135 | | - unsigned long size) |
---|
136 | | -{ |
---|
137 | | - tlb->need_flush = 1; |
---|
138 | | - |
---|
139 | | - if (tlb->start > address) |
---|
140 | | - tlb->start = address; |
---|
141 | | - if (tlb->end < address + size) |
---|
142 | | - tlb->end = address + size; |
---|
143 | | -} |
---|
144 | | - |
---|
145 | | -/** |
---|
146 | | - * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. |
---|
147 | | - * |
---|
148 | | - * Record the fact that pte's were really umapped in ->need_flush, so we can |
---|
149 | | - * later optimise away the tlb invalidate. This helps when userspace is |
---|
150 | | - * unmapping already-unmapped pages, which happens quite a lot. |
---|
151 | | - */ |
---|
152 | | -#define tlb_remove_tlb_entry(tlb, ptep, address) \ |
---|
153 | | - do { \ |
---|
154 | | - tlb->need_flush = 1; \ |
---|
155 | | - __tlb_remove_tlb_entry(tlb, ptep, address); \ |
---|
156 | | - } while (0) |
---|
157 | | - |
---|
158 | | -#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ |
---|
159 | | - tlb_remove_tlb_entry(tlb, ptep, address) |
---|
160 | | - |
---|
161 | | -#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change |
---|
162 | | -static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, |
---|
163 | | - unsigned int page_size) |
---|
164 | | -{ |
---|
165 | | -} |
---|
166 | | - |
---|
167 | | -#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) |
---|
168 | | - |
---|
169 | | -#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr) |
---|
170 | | - |
---|
171 | | -#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) |
---|
172 | | - |
---|
173 | | -#define tlb_migrate_finish(mm) do {} while (0) |
---|
| 8 | +#include <asm-generic/cacheflush.h> |
---|
| 9 | +#include <asm-generic/tlb.h> |
---|
174 | 10 | |
---|
175 | 11 | #endif |
---|