.. | .. |
---|
6 | 6 | #define tlb_end_vma(tlb, vma) do { } while (0) |
---|
7 | 7 | #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) |
---|
8 | 8 | |
---|
9 | | -#define tlb_flush(tlb) \ |
---|
10 | | -{ \ |
---|
11 | | - if (!tlb->fullmm && !tlb->need_flush_all) \ |
---|
12 | | - flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \ |
---|
13 | | - else \ |
---|
14 | | - flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \ |
---|
15 | | -} |
---|
| 9 | +#define tlb_flush tlb_flush |
---|
| 10 | +static inline void tlb_flush(struct mmu_gather *tlb); |
---|
16 | 11 | |
---|
17 | 12 | #include <asm-generic/tlb.h> |
---|
| 13 | + |
---|
| 14 | +static inline void tlb_flush(struct mmu_gather *tlb) |
---|
| 15 | +{ |
---|
| 16 | + unsigned long start = 0UL, end = TLB_FLUSH_ALL; |
---|
| 17 | + unsigned int stride_shift = tlb_get_unmap_shift(tlb); |
---|
| 18 | + |
---|
| 19 | + if (!tlb->fullmm && !tlb->need_flush_all) { |
---|
| 20 | + start = tlb->start; |
---|
| 21 | + end = tlb->end; |
---|
| 22 | + } |
---|
| 23 | + |
---|
| 24 | + flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables); |
---|
| 25 | +} |
---|
18 | 26 | |
---|
19 | 27 | /* |
---|
20 | 28 | * While x86 architecture in general requires an IPI to perform TLB |
---|
21 | 29 | * shootdown, enablement code for several hypervisors overrides |
---|
22 | 30 | * .flush_tlb_others hook in pv_mmu_ops and implements it by issuing |
---|
23 | 31 | * a hypercall. To keep software pagetable walkers safe in this case we |
---|
24 | | - * switch to RCU based table free (HAVE_RCU_TABLE_FREE). See the comment |
---|
25 | | - * below 'ifdef CONFIG_HAVE_RCU_TABLE_FREE' in include/asm-generic/tlb.h |
---|
| 32 | + * switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the comment |
---|
| 33 | + * below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h |
---|
26 | 34 | * for more details. |
---|
27 | 35 | */ |
---|
28 | 36 | static inline void __tlb_remove_table(void *table) |
---|