.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
---|
1 | 2 | /* |
---|
2 | 3 | * Based on arch/arm/include/asm/tlbflush.h |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 1999-2003 Russell King |
---|
5 | 6 | * Copyright (C) 2012 ARM Ltd. |
---|
6 | | - * |
---|
7 | | - * This program is free software; you can redistribute it and/or modify |
---|
8 | | - * it under the terms of the GNU General Public License version 2 as |
---|
9 | | - * published by the Free Software Foundation. |
---|
10 | | - * |
---|
11 | | - * This program is distributed in the hope that it will be useful, |
---|
12 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
13 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
14 | | - * GNU General Public License for more details. |
---|
15 | | - * |
---|
16 | | - * You should have received a copy of the GNU General Public License |
---|
17 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
18 | 7 | */ |
---|
19 | 8 | #ifndef __ASM_TLBFLUSH_H |
---|
20 | 9 | #define __ASM_TLBFLUSH_H |
---|
21 | 10 | |
---|
22 | 11 | #ifndef __ASSEMBLY__ |
---|
23 | 12 | |
---|
| 13 | +#include <linux/bitfield.h> |
---|
| 14 | +#include <linux/mm_types.h> |
---|
24 | 15 | #include <linux/sched.h> |
---|
25 | 16 | #include <asm/cputype.h> |
---|
26 | 17 | #include <asm/mmu.h> |
---|
.. | .. |
---|
37 | 28 | * not. The macros handles invoking the asm with or without the |
---|
38 | 29 | * register argument as appropriate. |
---|
39 | 30 | */ |
---|
40 | | -#define __TLBI_0(op, arg) asm ("tlbi " #op "\n" \ |
---|
| 31 | +#define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE \ |
---|
| 32 | + "tlbi " #op "\n" \ |
---|
41 | 33 | ALTERNATIVE("nop\n nop", \ |
---|
42 | 34 | "dsb ish\n tlbi " #op, \ |
---|
43 | 35 | ARM64_WORKAROUND_REPEAT_TLBI, \ |
---|
44 | | - CONFIG_QCOM_FALKOR_ERRATUM_1009) \ |
---|
| 36 | + CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \ |
---|
45 | 37 | : : ) |
---|
46 | 38 | |
---|
47 | | -#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \ |
---|
| 39 | +#define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE \ |
---|
| 40 | + "tlbi " #op ", %0\n" \ |
---|
48 | 41 | ALTERNATIVE("nop\n nop", \ |
---|
49 | 42 | "dsb ish\n tlbi " #op ", %0", \ |
---|
50 | 43 | ARM64_WORKAROUND_REPEAT_TLBI, \ |
---|
51 | | - CONFIG_QCOM_FALKOR_ERRATUM_1009) \ |
---|
| 44 | + CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \ |
---|
52 | 45 | : : "r" (arg)) |
---|
53 | 46 | |
---|
54 | 47 | #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg) |
---|
.. | .. |
---|
70 | 63 | }) |
---|
71 | 64 | |
---|
72 | 65 | /* |
---|
73 | | - * TLB Management |
---|
74 | | - * ============== |
---|
| 66 | + * Get translation granule of the system, which is decided by |
---|
| 67 | + * PAGE_SIZE. Used by TTL. |
---|
| 68 | + * - 4KB : 1 |
---|
| 69 | + * - 16KB : 2 |
---|
| 70 | + * - 64KB : 3 |
---|
| 71 | + */ |
---|
| 72 | +#define TLBI_TTL_TG_4K 1 |
---|
| 73 | +#define TLBI_TTL_TG_16K 2 |
---|
| 74 | +#define TLBI_TTL_TG_64K 3 |
---|
| 75 | + |
---|
| 76 | +static inline unsigned long get_trans_granule(void) |
---|
| 77 | +{ |
---|
| 78 | + switch (PAGE_SIZE) { |
---|
| 79 | + case SZ_4K: |
---|
| 80 | + return TLBI_TTL_TG_4K; |
---|
| 81 | + case SZ_16K: |
---|
| 82 | + return TLBI_TTL_TG_16K; |
---|
| 83 | + case SZ_64K: |
---|
| 84 | + return TLBI_TTL_TG_64K; |
---|
| 85 | + default: |
---|
| 86 | + return 0; |
---|
| 87 | + } |
---|
| 88 | +} |
---|
| 89 | + |
---|
| 90 | +/* |
---|
| 91 | + * Level-based TLBI operations. |
---|
75 | 92 | * |
---|
76 | | - * The TLB specific code is expected to perform whatever tests it needs |
---|
77 | | - * to determine if it should invalidate the TLB for each call. Start |
---|
78 | | - * addresses are inclusive and end addresses are exclusive; it is safe to |
---|
79 | | - * round these addresses down. |
---|
| 93 | + * When ARMv8.4-TTL exists, TLBI operations take an additional hint for |
---|
| 94 | + * the level at which the invalidation must take place. If the level is |
---|
| 95 | + * wrong, no invalidation may take place. In the case where the level |
---|
| 96 | + * cannot be easily determined, a 0 value for the level parameter will |
---|
| 97 | + * perform a non-hinted invalidation. |
---|
| 98 | + * |
---|
| 99 | + * For Stage-2 invalidation, use the level values provided to that effect |
---|
| 100 | + * in asm/stage2_pgtable.h. |
---|
| 101 | + */ |
---|
| 102 | +#define TLBI_TTL_MASK GENMASK_ULL(47, 44) |
---|
| 103 | + |
---|
| 104 | +#define __tlbi_level(op, addr, level) do { \ |
---|
| 105 | + u64 arg = addr; \ |
---|
| 106 | + \ |
---|
| 107 | + if (cpus_have_const_cap(ARM64_HAS_ARMv8_4_TTL) && \ |
---|
| 108 | + level) { \ |
---|
| 109 | + u64 ttl = level & 3; \ |
---|
| 110 | + ttl |= get_trans_granule() << 2; \ |
---|
| 111 | + arg &= ~TLBI_TTL_MASK; \ |
---|
| 112 | + arg |= FIELD_PREP(TLBI_TTL_MASK, ttl); \ |
---|
| 113 | + } \ |
---|
| 114 | + \ |
---|
| 115 | + __tlbi(op, arg); \ |
---|
| 116 | +} while(0) |
---|
| 117 | + |
---|
| 118 | +#define __tlbi_user_level(op, arg, level) do { \ |
---|
| 119 | + if (arm64_kernel_unmapped_at_el0()) \ |
---|
| 120 | + __tlbi_level(op, (arg | USER_ASID_FLAG), level); \ |
---|
| 121 | +} while (0) |
---|
| 122 | + |
---|
| 123 | +/* |
---|
| 124 | + * This macro creates a properly formatted VA operand for the TLB RANGE. |
---|
| 125 | + * The value bit assignments are: |
---|
| 126 | + * |
---|
| 127 | + * +----------+------+-------+-------+-------+----------------------+ |
---|
| 128 | + * | ASID | TG | SCALE | NUM | TTL | BADDR | |
---|
| 129 | + * +-----------------+-------+-------+-------+----------------------+ |
---|
| 130 | + * |63 48|47 46|45 44|43 39|38 37|36 0| |
---|
| 131 | + * |
---|
| 132 | + * The address range is determined by below formula: |
---|
| 133 | + * [BADDR, BADDR + (NUM + 1) * 2^(5*SCALE + 1) * PAGESIZE) |
---|
| 134 | + * |
---|
| 135 | + */ |
---|
| 136 | +#define __TLBI_VADDR_RANGE(addr, asid, scale, num, ttl) \ |
---|
| 137 | + ({ \ |
---|
| 138 | + unsigned long __ta = (addr) >> PAGE_SHIFT; \ |
---|
| 139 | + __ta &= GENMASK_ULL(36, 0); \ |
---|
| 140 | + __ta |= (unsigned long)(ttl) << 37; \ |
---|
| 141 | + __ta |= (unsigned long)(num) << 39; \ |
---|
| 142 | + __ta |= (unsigned long)(scale) << 44; \ |
---|
| 143 | + __ta |= get_trans_granule() << 46; \ |
---|
| 144 | + __ta |= (unsigned long)(asid) << 48; \ |
---|
| 145 | + __ta; \ |
---|
| 146 | + }) |
---|
| 147 | + |
---|
| 148 | +/* These macros are used by the TLBI RANGE feature. */ |
---|
| 149 | +#define __TLBI_RANGE_PAGES(num, scale) \ |
---|
| 150 | + ((unsigned long)((num) + 1) << (5 * (scale) + 1)) |
---|
| 151 | +#define MAX_TLBI_RANGE_PAGES __TLBI_RANGE_PAGES(31, 3) |
---|
| 152 | + |
---|
| 153 | +/* |
---|
| 154 | + * Generate 'num' values from -1 to 30 with -1 rejected by the |
---|
| 155 | + * __flush_tlb_range() loop below. |
---|
| 156 | + */ |
---|
| 157 | +#define TLBI_RANGE_MASK GENMASK_ULL(4, 0) |
---|
| 158 | +#define __TLBI_RANGE_NUM(pages, scale) \ |
---|
| 159 | + ((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1) |
---|
| 160 | + |
---|
| 161 | +/* |
---|
| 162 | + * TLB Invalidation |
---|
| 163 | + * ================ |
---|
| 164 | + * |
---|
| 165 | + * This header file implements the low-level TLB invalidation routines |
---|
| 166 | + * (sometimes referred to as "flushing" in the kernel) for arm64. |
---|
| 167 | + * |
---|
| 168 | + * Every invalidation operation uses the following template: |
---|
| 169 | + * |
---|
| 170 | + * DSB ISHST // Ensure prior page-table updates have completed |
---|
| 171 | + * TLBI ... // Invalidate the TLB |
---|
| 172 | + * DSB ISH // Ensure the TLB invalidation has completed |
---|
| 173 | + * if (invalidated kernel mappings) |
---|
| 174 | + * ISB // Discard any instructions fetched from the old mapping |
---|
| 175 | + * |
---|
| 176 | + * |
---|
| 177 | + * The following functions form part of the "core" TLB invalidation API, |
---|
| 178 | + * as documented in Documentation/core-api/cachetlb.rst: |
---|
80 | 179 | * |
---|
81 | 180 | * flush_tlb_all() |
---|
82 | | - * |
---|
83 | | - * Invalidate the entire TLB. |
---|
| 181 | + * Invalidate the entire TLB (kernel + user) on all CPUs |
---|
84 | 182 | * |
---|
85 | 183 | * flush_tlb_mm(mm) |
---|
| 184 | + * Invalidate an entire user address space on all CPUs. |
---|
| 185 | + * The 'mm' argument identifies the ASID to invalidate. |
---|
86 | 186 | * |
---|
87 | | - * Invalidate all TLB entries in a particular address space. |
---|
88 | | - * - mm - mm_struct describing address space |
---|
| 187 | + * flush_tlb_range(vma, start, end) |
---|
| 188 | + * Invalidate the virtual-address range '[start, end)' on all |
---|
| 189 | + * CPUs for the user address space corresponding to 'vma->mm'. |
---|
| 190 | + * Note that this operation also invalidates any walk-cache |
---|
| 191 | + * entries associated with translations for the specified address |
---|
| 192 | + * range. |
---|
89 | 193 | * |
---|
90 | | - * flush_tlb_range(mm,start,end) |
---|
| 194 | + * flush_tlb_kernel_range(start, end) |
---|
| 195 | + * Same as flush_tlb_range(..., start, end), but applies to |
---|
| 196 | + * kernel mappings rather than a particular user address space. |
---|
| 197 | + * Whilst not explicitly documented, this function is used when |
---|
| 198 | + * unmapping pages from vmalloc/io space. |
---|
91 | 199 | * |
---|
92 | | - * Invalidate a range of TLB entries in the specified address |
---|
93 | | - * space. |
---|
94 | | - * - mm - mm_struct describing address space |
---|
95 | | - * - start - start address (may not be aligned) |
---|
96 | | - * - end - end address (exclusive, may not be aligned) |
---|
| 200 | + * flush_tlb_page(vma, addr) |
---|
| 201 | + * Invalidate a single user mapping for address 'addr' in the |
---|
| 202 | + * address space corresponding to 'vma->mm'. Note that this |
---|
| 203 | + * operation only invalidates a single, last-level page-table |
---|
| 204 | + * entry and therefore does not affect any walk-caches. |
---|
97 | 205 | * |
---|
98 | | - * flush_tlb_page(vaddr,vma) |
---|
99 | 206 | * |
---|
100 | | - * Invalidate the specified page in the specified address range. |
---|
101 | | - * - vaddr - virtual address (may not be aligned) |
---|
102 | | - * - vma - vma_struct describing address range |
---|
| 207 | + * Next, we have some undocumented invalidation routines that you probably |
---|
| 208 | + * don't want to call unless you know what you're doing: |
---|
103 | 209 | * |
---|
104 | | - * flush_kern_tlb_page(kaddr) |
---|
| 210 | + * local_flush_tlb_all() |
---|
| 211 | + * Same as flush_tlb_all(), but only applies to the calling CPU. |
---|
105 | 212 | * |
---|
106 | | - * Invalidate the TLB entry for the specified page. The address |
---|
107 | | - * will be in the kernels virtual memory space. Current uses |
---|
108 | | - * only require the D-TLB to be invalidated. |
---|
109 | | - * - kaddr - Kernel virtual memory address |
---|
| 213 | + * __flush_tlb_kernel_pgtable(addr) |
---|
| 214 | + * Invalidate a single kernel mapping for address 'addr' on all |
---|
| 215 | + * CPUs, ensuring that any walk-cache entries associated with the |
---|
| 216 | + * translation are also invalidated. |
---|
| 217 | + * |
---|
| 218 | + * __flush_tlb_range(vma, start, end, stride, last_level) |
---|
| 219 | + * Invalidate the virtual-address range '[start, end)' on all |
---|
| 220 | + * CPUs for the user address space corresponding to 'vma->mm'. |
---|
| 221 | + * The invalidation operations are issued at a granularity |
---|
| 222 | + * determined by 'stride' and only affect any walk-cache entries |
---|
| 223 | + * if 'last_level' is equal to false. |
---|
| 224 | + * |
---|
| 225 | + * |
---|
| 226 | + * Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented |
---|
| 227 | + * on top of these routines, since that is our interface to the mmu_gather |
---|
| 228 | + * API as used by munmap() and friends. |
---|
110 | 229 | */ |
---|
111 | 230 | static inline void local_flush_tlb_all(void) |
---|
112 | 231 | { |
---|
.. | .. |
---|
126 | 245 | |
---|
127 | 246 | static inline void flush_tlb_mm(struct mm_struct *mm) |
---|
128 | 247 | { |
---|
129 | | - unsigned long asid = __TLBI_VADDR(0, ASID(mm)); |
---|
| 248 | + unsigned long asid; |
---|
130 | 249 | |
---|
131 | 250 | dsb(ishst); |
---|
| 251 | + asid = __TLBI_VADDR(0, ASID(mm)); |
---|
132 | 252 | __tlbi(aside1is, asid); |
---|
133 | 253 | __tlbi_user(aside1is, asid); |
---|
134 | 254 | dsb(ish); |
---|
135 | 255 | } |
---|
136 | 256 | |
---|
| 257 | +static inline void flush_tlb_page_nosync(struct vm_area_struct *vma, |
---|
| 258 | + unsigned long uaddr) |
---|
| 259 | +{ |
---|
| 260 | + unsigned long addr; |
---|
| 261 | + |
---|
| 262 | + dsb(ishst); |
---|
| 263 | + addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm)); |
---|
| 264 | + __tlbi(vale1is, addr); |
---|
| 265 | + __tlbi_user(vale1is, addr); |
---|
| 266 | +} |
---|
| 267 | + |
---|
137 | 268 | static inline void flush_tlb_page(struct vm_area_struct *vma, |
---|
138 | 269 | unsigned long uaddr) |
---|
139 | 270 | { |
---|
140 | | - unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm)); |
---|
141 | | - |
---|
142 | | - dsb(ishst); |
---|
143 | | - __tlbi(vale1is, addr); |
---|
144 | | - __tlbi_user(vale1is, addr); |
---|
| 271 | + flush_tlb_page_nosync(vma, uaddr); |
---|
145 | 272 | dsb(ish); |
---|
146 | 273 | } |
---|
147 | 274 | |
---|
.. | .. |
---|
149 | 276 | * This is meant to avoid soft lock-ups on large TLB flushing ranges and not |
---|
150 | 277 | * necessarily a performance improvement. |
---|
151 | 278 | */ |
---|
152 | | -#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT) |
---|
| 279 | +#define MAX_TLBI_OPS PTRS_PER_PTE |
---|
153 | 280 | |
---|
154 | 281 | static inline void __flush_tlb_range(struct vm_area_struct *vma, |
---|
155 | 282 | unsigned long start, unsigned long end, |
---|
156 | | - bool last_level) |
---|
| 283 | + unsigned long stride, bool last_level, |
---|
| 284 | + int tlb_level) |
---|
157 | 285 | { |
---|
158 | | - unsigned long asid = ASID(vma->vm_mm); |
---|
159 | | - unsigned long addr; |
---|
| 286 | + int num = 0; |
---|
| 287 | + int scale = 0; |
---|
| 288 | + unsigned long asid, addr, pages; |
---|
160 | 289 | |
---|
161 | | - if ((end - start) > MAX_TLB_RANGE) { |
---|
| 290 | + start = round_down(start, stride); |
---|
| 291 | + end = round_up(end, stride); |
---|
| 292 | + pages = (end - start) >> PAGE_SHIFT; |
---|
| 293 | + |
---|
| 294 | + /* |
---|
| 295 | + * When not uses TLB range ops, we can handle up to |
---|
| 296 | + * (MAX_TLBI_OPS - 1) pages; |
---|
| 297 | + * When uses TLB range ops, we can handle up to |
---|
| 298 | + * (MAX_TLBI_RANGE_PAGES - 1) pages. |
---|
| 299 | + */ |
---|
| 300 | + if ((!system_supports_tlb_range() && |
---|
| 301 | + (end - start) >= (MAX_TLBI_OPS * stride)) || |
---|
| 302 | + pages >= MAX_TLBI_RANGE_PAGES) { |
---|
162 | 303 | flush_tlb_mm(vma->vm_mm); |
---|
163 | 304 | return; |
---|
164 | 305 | } |
---|
165 | 306 | |
---|
166 | | - start = __TLBI_VADDR(start, asid); |
---|
167 | | - end = __TLBI_VADDR(end, asid); |
---|
168 | | - |
---|
169 | 307 | dsb(ishst); |
---|
170 | | - for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) { |
---|
171 | | - if (last_level) { |
---|
172 | | - __tlbi(vale1is, addr); |
---|
173 | | - __tlbi_user(vale1is, addr); |
---|
174 | | - } else { |
---|
175 | | - __tlbi(vae1is, addr); |
---|
176 | | - __tlbi_user(vae1is, addr); |
---|
| 308 | + asid = ASID(vma->vm_mm); |
---|
| 309 | + |
---|
| 310 | + /* |
---|
| 311 | + * When the CPU does not support TLB range operations, flush the TLB |
---|
| 312 | + * entries one by one at the granularity of 'stride'. If the the TLB |
---|
| 313 | + * range ops are supported, then: |
---|
| 314 | + * |
---|
| 315 | + * 1. If 'pages' is odd, flush the first page through non-range |
---|
| 316 | + * operations; |
---|
| 317 | + * |
---|
| 318 | + * 2. For remaining pages: the minimum range granularity is decided |
---|
| 319 | + * by 'scale', so multiple range TLBI operations may be required. |
---|
| 320 | + * Start from scale = 0, flush the corresponding number of pages |
---|
| 321 | + * ((num+1)*2^(5*scale+1) starting from 'addr'), then increase it |
---|
| 322 | + * until no pages left. |
---|
| 323 | + * |
---|
| 324 | + * Note that certain ranges can be represented by either num = 31 and |
---|
| 325 | + * scale or num = 0 and scale + 1. The loop below favours the latter |
---|
| 326 | + * since num is limited to 30 by the __TLBI_RANGE_NUM() macro. |
---|
| 327 | + */ |
---|
| 328 | + while (pages > 0) { |
---|
| 329 | + if (!system_supports_tlb_range() || |
---|
| 330 | + pages % 2 == 1) { |
---|
| 331 | + addr = __TLBI_VADDR(start, asid); |
---|
| 332 | + if (last_level) { |
---|
| 333 | + __tlbi_level(vale1is, addr, tlb_level); |
---|
| 334 | + __tlbi_user_level(vale1is, addr, tlb_level); |
---|
| 335 | + } else { |
---|
| 336 | + __tlbi_level(vae1is, addr, tlb_level); |
---|
| 337 | + __tlbi_user_level(vae1is, addr, tlb_level); |
---|
| 338 | + } |
---|
| 339 | + start += stride; |
---|
| 340 | + pages -= stride >> PAGE_SHIFT; |
---|
| 341 | + continue; |
---|
177 | 342 | } |
---|
| 343 | + |
---|
| 344 | + num = __TLBI_RANGE_NUM(pages, scale); |
---|
| 345 | + if (num >= 0) { |
---|
| 346 | + addr = __TLBI_VADDR_RANGE(start, asid, scale, |
---|
| 347 | + num, tlb_level); |
---|
| 348 | + if (last_level) { |
---|
| 349 | + __tlbi(rvale1is, addr); |
---|
| 350 | + __tlbi_user(rvale1is, addr); |
---|
| 351 | + } else { |
---|
| 352 | + __tlbi(rvae1is, addr); |
---|
| 353 | + __tlbi_user(rvae1is, addr); |
---|
| 354 | + } |
---|
| 355 | + start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; |
---|
| 356 | + pages -= __TLBI_RANGE_PAGES(num, scale); |
---|
| 357 | + } |
---|
| 358 | + scale++; |
---|
178 | 359 | } |
---|
179 | 360 | dsb(ish); |
---|
180 | 361 | } |
---|
.. | .. |
---|
182 | 363 | static inline void flush_tlb_range(struct vm_area_struct *vma, |
---|
183 | 364 | unsigned long start, unsigned long end) |
---|
184 | 365 | { |
---|
185 | | - __flush_tlb_range(vma, start, end, false); |
---|
| 366 | + /* |
---|
| 367 | + * We cannot use leaf-only invalidation here, since we may be invalidating |
---|
| 368 | + * table entries as part of collapsing hugepages or moving page tables. |
---|
| 369 | + * Set the tlb_level to 0 because we can not get enough information here. |
---|
| 370 | + */ |
---|
| 371 | + __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0); |
---|
186 | 372 | } |
---|
187 | 373 | |
---|
188 | 374 | static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
---|
189 | 375 | { |
---|
190 | 376 | unsigned long addr; |
---|
191 | 377 | |
---|
192 | | - if ((end - start) > MAX_TLB_RANGE) { |
---|
| 378 | + if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) { |
---|
193 | 379 | flush_tlb_all(); |
---|
194 | 380 | return; |
---|
195 | 381 | } |
---|
.. | .. |
---|
199 | 385 | |
---|
200 | 386 | dsb(ishst); |
---|
201 | 387 | for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) |
---|
202 | | - __tlbi(vaae1is, addr); |
---|
| 388 | + __tlbi(vaale1is, addr); |
---|
203 | 389 | dsb(ish); |
---|
204 | 390 | isb(); |
---|
205 | 391 | } |
---|
.. | .. |
---|
208 | 394 | * Used to invalidate the TLB (walk caches) corresponding to intermediate page |
---|
209 | 395 | * table levels (pgd/pud/pmd). |
---|
210 | 396 | */ |
---|
211 | | -static inline void __flush_tlb_pgtable(struct mm_struct *mm, |
---|
212 | | - unsigned long uaddr) |
---|
213 | | -{ |
---|
214 | | - unsigned long addr = __TLBI_VADDR(uaddr, ASID(mm)); |
---|
215 | | - |
---|
216 | | - __tlbi(vae1is, addr); |
---|
217 | | - __tlbi_user(vae1is, addr); |
---|
218 | | - dsb(ish); |
---|
219 | | -} |
---|
220 | | - |
---|
221 | 397 | static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr) |
---|
222 | 398 | { |
---|
223 | 399 | unsigned long addr = __TLBI_VADDR(kaddr, 0); |
---|
224 | 400 | |
---|
| 401 | + dsb(ishst); |
---|
225 | 402 | __tlbi(vaae1is, addr); |
---|
226 | 403 | dsb(ish); |
---|
227 | 404 | isb(); |
---|