hc
2024-09-20 cf4ce59b3b70238352c7f1729f0f7223214828ad
kernel/arch/s390/include/asm/tlb.h
....@@ -22,112 +22,38 @@
2222 * Pages used for the page tables is a different story. FIXME: more
2323 */
2424
25
-#include <linux/mm.h>
26
-#include <linux/pagemap.h>
27
-#include <linux/swap.h>
28
-#include <asm/processor.h>
29
-#include <asm/pgalloc.h>
25
+void __tlb_remove_table(void *_table);
26
+static inline void tlb_flush(struct mmu_gather *tlb);
27
+static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
28
+ struct page *page, int page_size);
29
+
30
+#define tlb_start_vma(tlb, vma) do { } while (0)
31
+#define tlb_end_vma(tlb, vma) do { } while (0)
32
+
33
+#define tlb_flush tlb_flush
34
+#define pte_free_tlb pte_free_tlb
35
+#define pmd_free_tlb pmd_free_tlb
36
+#define p4d_free_tlb p4d_free_tlb
37
+#define pud_free_tlb pud_free_tlb
38
+
3039 #include <asm/tlbflush.h>
31
-
32
-struct mmu_gather {
33
- struct mm_struct *mm;
34
- struct mmu_table_batch *batch;
35
- unsigned int fullmm;
36
- unsigned long start, end;
37
-};
38
-
39
-struct mmu_table_batch {
40
- struct rcu_head rcu;
41
- unsigned int nr;
42
- void *tables[0];
43
-};
44
-
45
-#define MAX_TABLE_BATCH \
46
- ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
47
-
48
-extern void tlb_table_flush(struct mmu_gather *tlb);
49
-extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
50
-
51
-static inline void
52
-arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
53
- unsigned long start, unsigned long end)
54
-{
55
- tlb->mm = mm;
56
- tlb->start = start;
57
- tlb->end = end;
58
- tlb->fullmm = !(start | (end+1));
59
- tlb->batch = NULL;
60
-}
61
-
62
-static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
63
-{
64
- __tlb_flush_mm_lazy(tlb->mm);
65
-}
66
-
67
-static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
68
-{
69
- tlb_table_flush(tlb);
70
-}
71
-
72
-
73
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
74
-{
75
- tlb_flush_mmu_tlbonly(tlb);
76
- tlb_flush_mmu_free(tlb);
77
-}
78
-
79
-static inline void
80
-arch_tlb_finish_mmu(struct mmu_gather *tlb,
81
- unsigned long start, unsigned long end, bool force)
82
-{
83
- if (force) {
84
- tlb->start = start;
85
- tlb->end = end;
86
- }
87
-
88
- tlb_flush_mmu(tlb);
89
-}
40
+#include <asm-generic/tlb.h>
9041
9142 /*
9243 * Release the page cache reference for a pte removed by
9344 * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
9445 * has already been freed, so just do free_page_and_swap_cache.
9546 */
96
-static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
97
-{
98
- free_page_and_swap_cache(page);
99
- return false; /* avoid calling tlb_flush_mmu */
100
-}
101
-
102
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
103
-{
104
- free_page_and_swap_cache(page);
105
-}
106
-
10747 static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
10848 struct page *page, int page_size)
10949 {
110
- return __tlb_remove_page(tlb, page);
50
+ free_page_and_swap_cache(page);
51
+ return false;
11152 }
11253
113
-static inline void tlb_remove_page_size(struct mmu_gather *tlb,
114
- struct page *page, int page_size)
54
+static inline void tlb_flush(struct mmu_gather *tlb)
11555 {
116
- return tlb_remove_page(tlb, page);
117
-}
118
-
119
-static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
120
- unsigned long address, unsigned long size)
121
-{
122
- /*
123
- * the range might exceed the original range that was provided to
124
- * tlb_gather_mmu(), so we need to update it despite the fact it is
125
- * usually not updated.
126
- */
127
- if (tlb->start > address)
128
- tlb->start = address;
129
- if (tlb->end < address + size)
130
- tlb->end = address + size;
56
+ __tlb_flush_mm_lazy(tlb->mm);
13157 }
13258
13359 /*
....@@ -135,8 +61,17 @@
13561 * page table from the tlb.
13662 */
13763 static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
138
- unsigned long address)
64
+ unsigned long address)
13965 {
66
+ __tlb_adjust_range(tlb, address, PAGE_SIZE);
67
+ tlb->mm->context.flush_mm = 1;
68
+ tlb->freed_tables = 1;
69
+ tlb->cleared_ptes = 1;
70
+ /*
71
+ * page_table_free_rcu takes care of the allocation bit masks
72
+ * of the 2K table fragments in the 4K page table page,
73
+ * then calls tlb_remove_table.
74
+ */
14075 page_table_free_rcu(tlb, (unsigned long *) pte, address);
14176 }
14277
....@@ -153,6 +88,10 @@
15388 if (mm_pmd_folded(tlb->mm))
15489 return;
15590 pgtable_pmd_page_dtor(virt_to_page(pmd));
91
+ __tlb_adjust_range(tlb, address, PAGE_SIZE);
92
+ tlb->mm->context.flush_mm = 1;
93
+ tlb->freed_tables = 1;
94
+ tlb->cleared_puds = 1;
15695 tlb_remove_table(tlb, pmd);
15796 }
15897
....@@ -168,6 +107,10 @@
168107 {
169108 if (mm_p4d_folded(tlb->mm))
170109 return;
110
+ __tlb_adjust_range(tlb, address, PAGE_SIZE);
111
+ tlb->mm->context.flush_mm = 1;
112
+ tlb->freed_tables = 1;
113
+ tlb->cleared_p4ds = 1;
171114 tlb_remove_table(tlb, p4d);
172115 }
173116
....@@ -183,23 +126,11 @@
183126 {
184127 if (mm_pud_folded(tlb->mm))
185128 return;
129
+ tlb->mm->context.flush_mm = 1;
130
+ tlb->freed_tables = 1;
131
+ tlb->cleared_puds = 1;
186132 tlb_remove_table(tlb, pud);
187133 }
188134
189
-#define tlb_start_vma(tlb, vma) do { } while (0)
190
-#define tlb_end_vma(tlb, vma) do { } while (0)
191
-#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
192
-#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0)
193
-#define tlb_migrate_finish(mm) do { } while (0)
194
-#define tlb_flush_pmd_range(tlb, addr, sz) do { } while (0)
195
-
196
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
197
- tlb_remove_tlb_entry(tlb, ptep, address)
198
-
199
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
200
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
201
- unsigned int page_size)
202
-{
203
-}
204135
205136 #endif /* _S390_TLB_H */