hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/include/asm-generic/tlb.h
....@@ -1,3 +1,4 @@
1
+/* SPDX-License-Identifier: GPL-2.0-or-later */
12 /* include/asm-generic/tlb.h
23 *
34 * Generic TLB shootdown code
....@@ -6,51 +7,176 @@
67 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
78 *
89 * Copyright 2011 Red Hat, Inc., Peter Zijlstra
9
- *
10
- * This program is free software; you can redistribute it and/or
11
- * modify it under the terms of the GNU General Public License
12
- * as published by the Free Software Foundation; either version
13
- * 2 of the License, or (at your option) any later version.
1410 */
1511 #ifndef _ASM_GENERIC__TLB_H
1612 #define _ASM_GENERIC__TLB_H
1713
1814 #include <linux/mmu_notifier.h>
1915 #include <linux/swap.h>
20
-#include <asm/pgalloc.h>
16
+#include <linux/hugetlb_inline.h>
2117 #include <asm/tlbflush.h>
18
+#include <asm/cacheflush.h>
2219
23
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
2420 /*
25
- * Semi RCU freeing of the page directories.
26
- *
27
- * This is needed by some architectures to implement software pagetable walkers.
28
- *
29
- * gup_fast() and other software pagetable walkers do a lockless page-table
30
- * walk and therefore needs some synchronization with the freeing of the page
31
- * directories. The chosen means to accomplish that is by disabling IRQs over
32
- * the walk.
33
- *
34
- * Architectures that use IPIs to flush TLBs will then automagically DTRT,
35
- * since we unlink the page, flush TLBs, free the page. Since the disabling of
36
- * IRQs delays the completion of the TLB flush we can never observe an already
37
- * freed page.
38
- *
39
- * Architectures that do not have this (PPC) need to delay the freeing by some
40
- * other means, this is that means.
41
- *
42
- * What we do is batch the freed directory pages (tables) and RCU free them.
43
- * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
44
- * holds off grace periods.
45
- *
46
- * However, in order to batch these pages we need to allocate storage, this
47
- * allocation is deep inside the MM code and can thus easily fail on memory
48
- * pressure. To guarantee progress we fall back to single table freeing, see
49
- * the implementation of tlb_remove_table_one().
50
- *
21
+ * Blindly accessing user memory from NMI context can be dangerous
22
+ * if we're in the middle of switching the current user task or switching
23
+ * the loaded mm.
5124 */
25
+#ifndef nmi_uaccess_okay
26
+# define nmi_uaccess_okay() true
27
+#endif
28
+
29
+#ifdef CONFIG_MMU
30
+
31
+/*
32
+ * Generic MMU-gather implementation.
33
+ *
34
+ * The mmu_gather data structure is used by the mm code to implement the
35
+ * correct and efficient ordering of freeing pages and TLB invalidations.
36
+ *
37
+ * This correct ordering is:
38
+ *
39
+ * 1) unhook page
40
+ * 2) TLB invalidate page
41
+ * 3) free page
42
+ *
43
+ * That is, we must never free a page before we have ensured there are no live
44
+ * translations left to it. Otherwise it might be possible to observe (or
45
+ * worse, change) the page content after it has been reused.
46
+ *
47
+ * The mmu_gather API consists of:
48
+ *
49
+ * - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
50
+ *
51
+ * Finish in particular will issue a (final) TLB invalidate and free
52
+ * all (remaining) queued pages.
53
+ *
54
+ * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
55
+ *
56
+ * Defaults to flushing at tlb_end_vma() to reset the range; helps when
57
+ * there's large holes between the VMAs.
58
+ *
59
+ * - tlb_remove_table()
60
+ *
61
+ * tlb_remove_table() is the basic primitive to free page-table directories
62
+ * (__p*_free_tlb()). In it's most primitive form it is an alias for
63
+ * tlb_remove_page() below, for when page directories are pages and have no
64
+ * additional constraints.
65
+ *
66
+ * See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
67
+ *
68
+ * - tlb_remove_page() / __tlb_remove_page()
69
+ * - tlb_remove_page_size() / __tlb_remove_page_size()
70
+ *
71
+ * __tlb_remove_page_size() is the basic primitive that queues a page for
72
+ * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
73
+ * boolean indicating if the queue is (now) full and a call to
74
+ * tlb_flush_mmu() is required.
75
+ *
76
+ * tlb_remove_page() and tlb_remove_page_size() imply the call to
77
+ * tlb_flush_mmu() when required and has no return value.
78
+ *
79
+ * - tlb_change_page_size()
80
+ *
81
+ * call before __tlb_remove_page*() to set the current page-size; implies a
82
+ * possible tlb_flush_mmu() call.
83
+ *
84
+ * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
85
+ *
86
+ * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
87
+ * related state, like the range)
88
+ *
89
+ * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
90
+ * whatever pages are still batched.
91
+ *
92
+ * - mmu_gather::fullmm
93
+ *
94
+ * A flag set by tlb_gather_mmu() to indicate we're going to free
95
+ * the entire mm; this allows a number of optimizations.
96
+ *
97
+ * - We can ignore tlb_{start,end}_vma(); because we don't
98
+ * care about ranges. Everything will be shot down.
99
+ *
100
+ * - (RISC) architectures that use ASIDs can cycle to a new ASID
101
+ * and delay the invalidation until ASID space runs out.
102
+ *
103
+ * - mmu_gather::need_flush_all
104
+ *
105
+ * A flag that can be set by the arch code if it wants to force
106
+ * flush the entire TLB irrespective of the range. For instance
107
+ * x86-PAE needs this when changing top-level entries.
108
+ *
109
+ * And allows the architecture to provide and implement tlb_flush():
110
+ *
111
+ * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
112
+ * use of:
113
+ *
114
+ * - mmu_gather::start / mmu_gather::end
115
+ *
116
+ * which provides the range that needs to be flushed to cover the pages to
117
+ * be freed.
118
+ *
119
+ * - mmu_gather::freed_tables
120
+ *
121
+ * set when we freed page table pages
122
+ *
123
+ * - tlb_get_unmap_shift() / tlb_get_unmap_size()
124
+ *
125
+ * returns the smallest TLB entry size unmapped in this range.
126
+ *
127
+ * If an architecture does not provide tlb_flush() a default implementation
128
+ * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
129
+ * specified, in which case we'll default to flush_tlb_mm().
130
+ *
131
+ * Additionally there are a few opt-in features:
132
+ *
133
+ * MMU_GATHER_PAGE_SIZE
134
+ *
135
+ * This ensures we call tlb_flush() every time tlb_change_page_size() actually
136
+ * changes the size and provides mmu_gather::page_size to tlb_flush().
137
+ *
138
+ * This might be useful if your architecture has size specific TLB
139
+ * invalidation instructions.
140
+ *
141
+ * MMU_GATHER_TABLE_FREE
142
+ *
143
+ * This provides tlb_remove_table(), to be used instead of tlb_remove_page()
144
+ * for page directores (__p*_free_tlb()).
145
+ *
146
+ * Useful if your architecture has non-page page directories.
147
+ *
148
+ * When used, an architecture is expected to provide __tlb_remove_table()
149
+ * which does the actual freeing of these pages.
150
+ *
151
+ * MMU_GATHER_RCU_TABLE_FREE
152
+ *
153
+ * Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
154
+ * comment below).
155
+ *
156
+ * Useful if your architecture doesn't use IPIs for remote TLB invalidates
157
+ * and therefore doesn't naturally serialize with software page-table walkers.
158
+ *
159
+ * MMU_GATHER_NO_RANGE
160
+ *
161
+ * Use this if your architecture lacks an efficient flush_tlb_range().
162
+ *
163
+ * MMU_GATHER_NO_GATHER
164
+ *
165
+ * If the option is set the mmu_gather will not track individual pages for
166
+ * delayed page free anymore. A platform that enables the option needs to
167
+ * provide its own implementation of the __tlb_remove_page_size() function to
168
+ * free pages.
169
+ *
170
+ * This is useful if your architecture already flushes TLB entries in the
171
+ * various ptep_get_and_clear() functions.
172
+ */
173
+
174
+#ifdef CONFIG_MMU_GATHER_TABLE_FREE
175
+
52176 struct mmu_table_batch {
177
+#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
53178 struct rcu_head rcu;
179
+#endif
54180 unsigned int nr;
55181 void *tables[0];
56182 };
....@@ -58,11 +184,41 @@
58184 #define MAX_TABLE_BATCH \
59185 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
60186
61
-extern void tlb_table_flush(struct mmu_gather *tlb);
62187 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
63188
189
+#else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
190
+
191
+/*
192
+ * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
193
+ * page directories and we can use the normal page batching to free them.
194
+ */
195
+#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
196
+
197
+#endif /* CONFIG_MMU_GATHER_TABLE_FREE */
198
+
199
+#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
200
+/*
201
+ * This allows an architecture that does not use the linux page-tables for
202
+ * hardware to skip the TLBI when freeing page tables.
203
+ */
204
+#ifndef tlb_needs_table_invalidate
205
+#define tlb_needs_table_invalidate() (true)
64206 #endif
65207
208
+void tlb_remove_table_sync_one(void);
209
+
210
+#else
211
+
212
+#ifdef tlb_needs_table_invalidate
213
+#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
214
+#endif
215
+
216
+static inline void tlb_remove_table_sync_one(void) { }
217
+
218
+#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
219
+
220
+
221
+#ifndef CONFIG_MMU_GATHER_NO_GATHER
66222 /*
67223 * If we can't allocate a page to make a big batch of page pointers
68224 * to work on, then just handle a few from the on-stack structure.
....@@ -87,41 +243,68 @@
87243 */
88244 #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
89245
90
-/* struct mmu_gather is an opaque type used by the mm code for passing around
246
+extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
247
+ int page_size);
248
+#endif
249
+
250
+/*
251
+ * struct mmu_gather is an opaque type used by the mm code for passing around
91252 * any data needed by arch specific code for tlb_remove_page.
92253 */
93254 struct mmu_gather {
94255 struct mm_struct *mm;
95
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
256
+
257
+#ifdef CONFIG_MMU_GATHER_TABLE_FREE
96258 struct mmu_table_batch *batch;
97259 #endif
260
+
98261 unsigned long start;
99262 unsigned long end;
100
- /* we are in the middle of an operation to clear
101
- * a full mm and can make some optimizations */
102
- unsigned int fullmm : 1,
103
- /* we have performed an operation which
104
- * requires a complete flush of the tlb */
105
- need_flush_all : 1;
263
+ /*
264
+ * we are in the middle of an operation to clear
265
+ * a full mm and can make some optimizations
266
+ */
267
+ unsigned int fullmm : 1;
106268
269
+ /*
270
+ * we have performed an operation which
271
+ * requires a complete flush of the tlb
272
+ */
273
+ unsigned int need_flush_all : 1;
274
+
275
+ /*
276
+ * we have removed page directories
277
+ */
278
+ unsigned int freed_tables : 1;
279
+
280
+ /*
281
+ * at which levels have we cleared entries?
282
+ */
283
+ unsigned int cleared_ptes : 1;
284
+ unsigned int cleared_pmds : 1;
285
+ unsigned int cleared_puds : 1;
286
+ unsigned int cleared_p4ds : 1;
287
+
288
+ /*
289
+ * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
290
+ */
291
+ unsigned int vma_exec : 1;
292
+ unsigned int vma_huge : 1;
293
+
294
+ unsigned int batch_count;
295
+
296
+#ifndef CONFIG_MMU_GATHER_NO_GATHER
107297 struct mmu_gather_batch *active;
108298 struct mmu_gather_batch local;
109299 struct page *__pages[MMU_GATHER_BUNDLE];
110
- unsigned int batch_count;
111
- int page_size;
300
+
301
+#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
302
+ unsigned int page_size;
303
+#endif
304
+#endif
112305 };
113306
114
-#define HAVE_GENERIC_MMU_GATHER
115
-
116
-void arch_tlb_gather_mmu(struct mmu_gather *tlb,
117
- struct mm_struct *mm, unsigned long start, unsigned long end);
118307 void tlb_flush_mmu(struct mmu_gather *tlb);
119
-void arch_tlb_finish_mmu(struct mmu_gather *tlb,
120
- unsigned long start, unsigned long end, bool force);
121
-void tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
122
- unsigned long size);
123
-extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
124
- int page_size);
125308
126309 static inline void __tlb_adjust_range(struct mmu_gather *tlb,
127310 unsigned long address,
....@@ -139,11 +322,107 @@
139322 tlb->start = TASK_SIZE;
140323 tlb->end = 0;
141324 }
325
+ tlb->freed_tables = 0;
326
+ tlb->cleared_ptes = 0;
327
+ tlb->cleared_pmds = 0;
328
+ tlb->cleared_puds = 0;
329
+ tlb->cleared_p4ds = 0;
330
+ /*
331
+ * Do not reset mmu_gather::vma_* fields here, we do not
332
+ * call into tlb_start_vma() again to set them if there is an
333
+ * intermediate flush.
334
+ */
142335 }
336
+
337
+#ifdef CONFIG_MMU_GATHER_NO_RANGE
338
+
339
+#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
340
+#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
341
+#endif
342
+
343
+/*
344
+ * When an architecture does not have efficient means of range flushing TLBs
345
+ * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
346
+ * range small. We equally don't have to worry about page granularity or other
347
+ * things.
348
+ *
349
+ * All we need to do is issue a full flush for any !0 range.
350
+ */
351
+static inline void tlb_flush(struct mmu_gather *tlb)
352
+{
353
+ if (tlb->end)
354
+ flush_tlb_mm(tlb->mm);
355
+}
356
+
357
+static inline void
358
+tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
359
+
360
+#define tlb_end_vma tlb_end_vma
361
+static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
362
+
363
+#else /* CONFIG_MMU_GATHER_NO_RANGE */
364
+
365
+#ifndef tlb_flush
366
+
367
+#if defined(tlb_start_vma) || defined(tlb_end_vma)
368
+#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
369
+#endif
370
+
371
+/*
372
+ * When an architecture does not provide its own tlb_flush() implementation
373
+ * but does have a reasonably efficient flush_vma_range() implementation
374
+ * use that.
375
+ */
376
+static inline void tlb_flush(struct mmu_gather *tlb)
377
+{
378
+ if (tlb->fullmm || tlb->need_flush_all) {
379
+ flush_tlb_mm(tlb->mm);
380
+ } else if (tlb->end) {
381
+ struct vm_area_struct vma = {
382
+ .vm_mm = tlb->mm,
383
+ .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
384
+ (tlb->vma_huge ? VM_HUGETLB : 0),
385
+ };
386
+
387
+ flush_tlb_range(&vma, tlb->start, tlb->end);
388
+ }
389
+}
390
+
391
+static inline void
392
+tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
393
+{
394
+ /*
395
+ * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
396
+ * mips-4k) flush only large pages.
397
+ *
398
+ * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
399
+ * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
400
+ * range.
401
+ *
402
+ * We rely on tlb_end_vma() to issue a flush, such that when we reset
403
+ * these values the batch is empty.
404
+ */
405
+ tlb->vma_huge = is_vm_hugetlb_page(vma);
406
+ tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
407
+}
408
+
409
+#else
410
+
411
+static inline void
412
+tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
413
+
414
+#endif
415
+
416
+#endif /* CONFIG_MMU_GATHER_NO_RANGE */
143417
144418 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
145419 {
146
- if (!tlb->end)
420
+ /*
421
+ * Anything calling __tlb_adjust_range() also sets at least one of
422
+ * these bits.
423
+ */
424
+ if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
425
+ tlb->cleared_puds || tlb->cleared_p4ds))
147426 return;
148427
149428 tlb_flush(tlb);
....@@ -172,21 +451,37 @@
172451 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
173452 }
174453
175
-#ifndef tlb_remove_check_page_size_change
176
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
177
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
454
+static inline void tlb_change_page_size(struct mmu_gather *tlb,
178455 unsigned int page_size)
179456 {
180
- /*
181
- * We don't care about page size change, just update
182
- * mmu_gather page size here so that debug checks
183
- * doesn't throw false warning.
184
- */
185
-#ifdef CONFIG_DEBUG_VM
457
+#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
458
+ if (tlb->page_size && tlb->page_size != page_size) {
459
+ if (!tlb->fullmm && !tlb->need_flush_all)
460
+ tlb_flush_mmu(tlb);
461
+ }
462
+
186463 tlb->page_size = page_size;
187464 #endif
188465 }
189
-#endif
466
+
467
+static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
468
+{
469
+ if (tlb->cleared_ptes)
470
+ return PAGE_SHIFT;
471
+ if (tlb->cleared_pmds)
472
+ return PMD_SHIFT;
473
+ if (tlb->cleared_puds)
474
+ return PUD_SHIFT;
475
+ if (tlb->cleared_p4ds)
476
+ return P4D_SHIFT;
477
+
478
+ return PAGE_SHIFT;
479
+}
480
+
481
+static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
482
+{
483
+ return 1UL << tlb_get_unmap_shift(tlb);
484
+}
190485
191486 /*
192487 * In the case of tlb vma handling, we can optimise these away in the
....@@ -194,18 +489,63 @@
194489 * the vmas are adjusted to only cover the region to be torn down.
195490 */
196491 #ifndef tlb_start_vma
197
-#define tlb_start_vma(tlb, vma) do { } while (0)
198
-#endif
492
+static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
493
+{
494
+ if (tlb->fullmm)
495
+ return;
199496
200
-#define __tlb_end_vma(tlb, vma) \
201
- do { \
202
- if (!tlb->fullmm) \
203
- tlb_flush_mmu_tlbonly(tlb); \
204
- } while (0)
497
+ tlb_update_vma_flags(tlb, vma);
498
+ flush_cache_range(vma, vma->vm_start, vma->vm_end);
499
+}
500
+#endif
205501
206502 #ifndef tlb_end_vma
207
-#define tlb_end_vma __tlb_end_vma
503
+static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
504
+{
505
+ if (tlb->fullmm)
506
+ return;
507
+
508
+ /*
509
+ * Do a TLB flush and reset the range at VMA boundaries; this avoids
510
+ * the ranges growing with the unused space between consecutive VMAs,
511
+ * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
512
+ * this.
513
+ */
514
+ tlb_flush_mmu_tlbonly(tlb);
515
+}
208516 #endif
517
+
518
+/*
519
+ * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
520
+ * and set corresponding cleared_*.
521
+ */
522
+static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
523
+ unsigned long address, unsigned long size)
524
+{
525
+ __tlb_adjust_range(tlb, address, size);
526
+ tlb->cleared_ptes = 1;
527
+}
528
+
529
+static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
530
+ unsigned long address, unsigned long size)
531
+{
532
+ __tlb_adjust_range(tlb, address, size);
533
+ tlb->cleared_pmds = 1;
534
+}
535
+
536
+static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
537
+ unsigned long address, unsigned long size)
538
+{
539
+ __tlb_adjust_range(tlb, address, size);
540
+ tlb->cleared_puds = 1;
541
+}
542
+
543
+static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
544
+ unsigned long address, unsigned long size)
545
+{
546
+ __tlb_adjust_range(tlb, address, size);
547
+ tlb->cleared_p4ds = 1;
548
+}
209549
210550 #ifndef __tlb_remove_tlb_entry
211551 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
....@@ -220,14 +560,22 @@
220560 */
221561 #define tlb_remove_tlb_entry(tlb, ptep, address) \
222562 do { \
223
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
563
+ tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
224564 __tlb_remove_tlb_entry(tlb, ptep, address); \
225565 } while (0)
226566
227
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
228
- do { \
229
- __tlb_adjust_range(tlb, address, huge_page_size(h)); \
230
- __tlb_remove_tlb_entry(tlb, ptep, address); \
567
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
568
+ do { \
569
+ unsigned long _sz = huge_page_size(h); \
570
+ if (_sz >= P4D_SIZE) \
571
+ tlb_flush_p4d_range(tlb, address, _sz); \
572
+ else if (_sz >= PUD_SIZE) \
573
+ tlb_flush_pud_range(tlb, address, _sz); \
574
+ else if (_sz >= PMD_SIZE) \
575
+ tlb_flush_pmd_range(tlb, address, _sz); \
576
+ else \
577
+ tlb_flush_pte_range(tlb, address, _sz); \
578
+ __tlb_remove_tlb_entry(tlb, ptep, address); \
231579 } while (0)
232580
233581 /**
....@@ -240,7 +588,7 @@
240588
241589 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
242590 do { \
243
- __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
591
+ tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
244592 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
245593 } while (0)
246594
....@@ -254,7 +602,7 @@
254602
255603 #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
256604 do { \
257
- __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
605
+ tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
258606 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
259607 } while (0)
260608
....@@ -279,7 +627,8 @@
279627 #ifndef pte_free_tlb
280628 #define pte_free_tlb(tlb, ptep, address) \
281629 do { \
282
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
630
+ tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
631
+ tlb->freed_tables = 1; \
283632 __pte_free_tlb(tlb, ptep, address); \
284633 } while (0)
285634 #endif
....@@ -287,31 +636,30 @@
287636 #ifndef pmd_free_tlb
288637 #define pmd_free_tlb(tlb, pmdp, address) \
289638 do { \
290
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
639
+ tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
640
+ tlb->freed_tables = 1; \
291641 __pmd_free_tlb(tlb, pmdp, address); \
292642 } while (0)
293643 #endif
294644
295
-#ifndef __ARCH_HAS_4LEVEL_HACK
296645 #ifndef pud_free_tlb
297646 #define pud_free_tlb(tlb, pudp, address) \
298647 do { \
299
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
648
+ tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
649
+ tlb->freed_tables = 1; \
300650 __pud_free_tlb(tlb, pudp, address); \
301651 } while (0)
302652 #endif
303
-#endif
304653
305
-#ifndef __ARCH_HAS_5LEVEL_HACK
306654 #ifndef p4d_free_tlb
307655 #define p4d_free_tlb(tlb, pudp, address) \
308656 do { \
309
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
657
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
658
+ tlb->freed_tables = 1; \
310659 __p4d_free_tlb(tlb, pudp, address); \
311660 } while (0)
312661 #endif
313
-#endif
314662
315
-#define tlb_migrate_finish(mm) do {} while (0)
663
+#endif /* CONFIG_MMU */
316664
317665 #endif /* _ASM_GENERIC__TLB_H */