hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/arch/arm/include/asm/tlb.h
....@@ -1,11 +1,8 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 * arch/arm/include/asm/tlb.h
34 *
45 * Copyright (C) 2002 Russell King
5
- *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License version 2 as
8
- * published by the Free Software Foundation.
96 *
107 * Experimentation shows that on a StrongARM, it appears to be faster
118 * to use the "invalidate whole tlb" rather than "invalidate single
....@@ -30,282 +27,41 @@
3027 #else /* !CONFIG_MMU */
3128
3229 #include <linux/swap.h>
33
-#include <asm/pgalloc.h>
3430 #include <asm/tlbflush.h>
3531
36
-#define MMU_GATHER_BUNDLE 8
37
-
38
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
3932 static inline void __tlb_remove_table(void *_table)
4033 {
4134 free_page_and_swap_cache((struct page *)_table);
4235 }
4336
44
-struct mmu_table_batch {
45
- struct rcu_head rcu;
46
- unsigned int nr;
47
- void *tables[0];
48
-};
49
-
50
-#define MAX_TABLE_BATCH \
51
- ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
52
-
53
-extern void tlb_table_flush(struct mmu_gather *tlb);
54
-extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
55
-
56
-#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry)
57
-#else
58
-#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry)
59
-#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
60
-
61
-/*
62
- * TLB handling. This allows us to remove pages from the page
63
- * tables, and efficiently handle the TLB issues.
64
- */
65
-struct mmu_gather {
66
- struct mm_struct *mm;
67
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
68
- struct mmu_table_batch *batch;
69
- unsigned int need_flush;
70
-#endif
71
- unsigned int fullmm;
72
- struct vm_area_struct *vma;
73
- unsigned long start, end;
74
- unsigned long range_start;
75
- unsigned long range_end;
76
- unsigned int nr;
77
- unsigned int max;
78
- struct page **pages;
79
- struct page *local[MMU_GATHER_BUNDLE];
80
-};
81
-
82
-DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
83
-
84
-/*
85
- * This is unnecessarily complex. There's three ways the TLB shootdown
86
- * code is used:
87
- * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
88
- * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
89
- * tlb->vma will be non-NULL.
90
- * 2. Unmapping all vmas. See exit_mmap().
91
- * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
92
- * tlb->vma will be non-NULL. Additionally, page tables will be freed.
93
- * 3. Unmapping argument pages. See shift_arg_pages().
94
- * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
95
- * tlb->vma will be NULL.
96
- */
97
-static inline void tlb_flush(struct mmu_gather *tlb)
98
-{
99
- if (tlb->fullmm || !tlb->vma)
100
- flush_tlb_mm(tlb->mm);
101
- else if (tlb->range_end > 0) {
102
- flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
103
- tlb->range_start = TASK_SIZE;
104
- tlb->range_end = 0;
105
- }
106
-}
107
-
108
-static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
109
-{
110
- if (!tlb->fullmm) {
111
- if (addr < tlb->range_start)
112
- tlb->range_start = addr;
113
- if (addr + PAGE_SIZE > tlb->range_end)
114
- tlb->range_end = addr + PAGE_SIZE;
115
- }
116
-}
117
-
118
-static inline void __tlb_alloc_page(struct mmu_gather *tlb)
119
-{
120
- unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
121
-
122
- if (addr) {
123
- tlb->pages = (void *)addr;
124
- tlb->max = PAGE_SIZE / sizeof(struct page *);
125
- }
126
-}
127
-
128
-static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
129
-{
130
- tlb_flush(tlb);
131
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
132
- tlb_table_flush(tlb);
133
-#endif
134
-}
135
-
136
-static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
137
-{
138
- free_pages_and_swap_cache(tlb->pages, tlb->nr);
139
- tlb->nr = 0;
140
- if (tlb->pages == tlb->local)
141
- __tlb_alloc_page(tlb);
142
-}
143
-
144
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
145
-{
146
- tlb_flush_mmu_tlbonly(tlb);
147
- tlb_flush_mmu_free(tlb);
148
-}
37
+#include <asm-generic/tlb.h>
14938
15039 static inline void
151
-arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
152
- unsigned long start, unsigned long end)
40
+__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
15341 {
154
- tlb->mm = mm;
155
- tlb->fullmm = !(start | (end+1));
156
- tlb->start = start;
157
- tlb->end = end;
158
- tlb->vma = NULL;
159
- tlb->max = ARRAY_SIZE(tlb->local);
160
- tlb->pages = tlb->local;
161
- tlb->nr = 0;
162
- __tlb_alloc_page(tlb);
42
+ pgtable_pte_page_dtor(pte);
16343
164
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
165
- tlb->batch = NULL;
166
-#endif
167
-}
168
-
169
-static inline void
170
-arch_tlb_finish_mmu(struct mmu_gather *tlb,
171
- unsigned long start, unsigned long end, bool force)
172
-{
173
- if (force) {
174
- tlb->range_start = start;
175
- tlb->range_end = end;
176
- }
177
-
178
- tlb_flush_mmu(tlb);
179
-
180
- /* keep the page table cache within bounds */
181
- check_pgt_cache();
182
-
183
- if (tlb->pages != tlb->local)
184
- free_pages((unsigned long)tlb->pages, 0);
185
-}
186
-
187
-/*
188
- * Memorize the range for the TLB flush.
189
- */
190
-static inline void
191
-tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
192
-{
193
- tlb_add_flush(tlb, addr);
194
-}
195
-
196
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
197
- tlb_remove_tlb_entry(tlb, ptep, address)
198
-/*
199
- * In the case of tlb vma handling, we can optimise these away in the
200
- * case where we're doing a full MM flush. When we're doing a munmap,
201
- * the vmas are adjusted to only cover the region to be torn down.
202
- */
203
-static inline void
204
-tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
205
-{
206
- if (!tlb->fullmm) {
207
- flush_cache_range(vma, vma->vm_start, vma->vm_end);
208
- tlb->vma = vma;
209
- tlb->range_start = TASK_SIZE;
210
- tlb->range_end = 0;
211
- }
212
-}
213
-
214
-static inline void
215
-tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
216
-{
217
- if (!tlb->fullmm)
218
- tlb_flush(tlb);
219
-}
220
-
221
-static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
222
-{
223
- tlb->pages[tlb->nr++] = page;
224
- VM_WARN_ON(tlb->nr > tlb->max);
225
- if (tlb->nr == tlb->max)
226
- return true;
227
- return false;
228
-}
229
-
230
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
231
-{
232
- if (__tlb_remove_page(tlb, page))
233
- tlb_flush_mmu(tlb);
234
-}
235
-
236
-static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
237
- struct page *page, int page_size)
238
-{
239
- return __tlb_remove_page(tlb, page);
240
-}
241
-
242
-static inline void tlb_remove_page_size(struct mmu_gather *tlb,
243
- struct page *page, int page_size)
244
-{
245
- return tlb_remove_page(tlb, page);
246
-}
247
-
248
-static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
249
- unsigned long addr)
250
-{
251
- pgtable_page_dtor(pte);
252
-
253
-#ifdef CONFIG_ARM_LPAE
254
- tlb_add_flush(tlb, addr);
255
-#else
44
+#ifndef CONFIG_ARM_LPAE
25645 /*
25746 * With the classic ARM MMU, a pte page has two corresponding pmd
25847 * entries, each covering 1MB.
25948 */
260
- addr &= PMD_MASK;
261
- tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
262
- tlb_add_flush(tlb, addr + SZ_1M);
49
+ addr = (addr & PMD_MASK) + SZ_1M;
50
+ __tlb_adjust_range(tlb, addr - PAGE_SIZE, 2 * PAGE_SIZE);
26351 #endif
26452
265
- tlb_remove_entry(tlb, pte);
53
+ tlb_remove_table(tlb, pte);
26654 }
26755
268
-static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
269
- unsigned long addr)
56
+static inline void
57
+__pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
27058 {
27159 #ifdef CONFIG_ARM_LPAE
272
- tlb_add_flush(tlb, addr);
273
- tlb_remove_entry(tlb, virt_to_page(pmdp));
60
+ struct page *page = virt_to_page(pmdp);
61
+
62
+ pgtable_pmd_page_dtor(page);
63
+ tlb_remove_table(tlb, page);
27464 #endif
275
-}
276
-
277
-static inline void
278
-tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
279
-{
280
- tlb_add_flush(tlb, addr);
281
-}
282
-
283
-static inline void
284
-tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
285
- unsigned long size)
286
-{
287
- tlb_add_flush(tlb, address);
288
- tlb_add_flush(tlb, address + size - PMD_SIZE);
289
-}
290
-
291
-#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
292
-#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
293
-#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
294
-
295
-#define tlb_migrate_finish(mm) do { } while (0)
296
-
297
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
298
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
299
- unsigned int page_size)
300
-{
301
-}
302
-
303
-static inline void tlb_flush_remove_tables(struct mm_struct *mm)
304
-{
305
-}
306
-
307
-static inline void tlb_flush_remove_tables_local(void *arg)
308
-{
30965 }
31066
31167 #endif /* CONFIG_MMU */