.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Based on arch/arm/mm/context.c |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. |
---|
5 | 6 | * Copyright (C) 2012 ARM Ltd. |
---|
6 | | - * |
---|
7 | | - * This program is free software; you can redistribute it and/or modify |
---|
8 | | - * it under the terms of the GNU General Public License version 2 as |
---|
9 | | - * published by the Free Software Foundation. |
---|
10 | | - * |
---|
11 | | - * This program is distributed in the hope that it will be useful, |
---|
12 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
13 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
14 | | - * GNU General Public License for more details. |
---|
15 | | - * |
---|
16 | | - * You should have received a copy of the GNU General Public License |
---|
17 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
18 | 7 | */ |
---|
19 | 8 | |
---|
| 9 | +#include <linux/bitfield.h> |
---|
20 | 10 | #include <linux/bitops.h> |
---|
21 | 11 | #include <linux/sched.h> |
---|
22 | 12 | #include <linux/slab.h> |
---|
.. | .. |
---|
37 | 27 | static DEFINE_PER_CPU(u64, reserved_asids); |
---|
38 | 28 | static cpumask_t tlb_flush_pending; |
---|
39 | 29 | |
---|
| 30 | +static unsigned long max_pinned_asids; |
---|
| 31 | +static unsigned long nr_pinned_asids; |
---|
| 32 | +static unsigned long *pinned_asid_map; |
---|
| 33 | + |
---|
40 | 34 | #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) |
---|
41 | 35 | #define ASID_FIRST_VERSION (1UL << asid_bits) |
---|
42 | 36 | |
---|
43 | | -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
---|
44 | | -#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1) |
---|
45 | | -#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1) |
---|
46 | | -#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK) |
---|
47 | | -#else |
---|
48 | | -#define NUM_USER_ASIDS (ASID_FIRST_VERSION) |
---|
| 37 | +#define NUM_USER_ASIDS ASID_FIRST_VERSION |
---|
49 | 38 | #define asid2idx(asid) ((asid) & ~ASID_MASK) |
---|
50 | 39 | #define idx2asid(idx) asid2idx(idx) |
---|
51 | | -#endif |
---|
52 | 40 | |
---|
53 | 41 | /* Get the ASIDBits supported by the current CPU */ |
---|
54 | 42 | static u32 get_cpu_asid_bits(void) |
---|
.. | .. |
---|
61 | 49 | default: |
---|
62 | 50 | pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", |
---|
63 | 51 | smp_processor_id(), fld); |
---|
64 | | - /* Fallthrough */ |
---|
| 52 | + fallthrough; |
---|
65 | 53 | case 0: |
---|
66 | 54 | asid = 8; |
---|
67 | 55 | break; |
---|
.. | .. |
---|
88 | 76 | } |
---|
89 | 77 | } |
---|
90 | 78 | |
---|
91 | | -static void flush_context(unsigned int cpu) |
---|
| 79 | +static void set_kpti_asid_bits(unsigned long *map) |
---|
| 80 | +{ |
---|
| 81 | + unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long); |
---|
| 82 | + /* |
---|
| 83 | + * In case of KPTI kernel/user ASIDs are allocated in |
---|
| 84 | + * pairs, the bottom bit distinguishes the two: if it |
---|
| 85 | + * is set, then the ASID will map only userspace. Thus |
---|
| 86 | + * mark even as reserved for kernel. |
---|
| 87 | + */ |
---|
| 88 | + memset(map, 0xaa, len); |
---|
| 89 | +} |
---|
| 90 | + |
---|
| 91 | +static void set_reserved_asid_bits(void) |
---|
| 92 | +{ |
---|
| 93 | + if (pinned_asid_map) |
---|
| 94 | + bitmap_copy(asid_map, pinned_asid_map, NUM_USER_ASIDS); |
---|
| 95 | + else if (arm64_kernel_unmapped_at_el0()) |
---|
| 96 | + set_kpti_asid_bits(asid_map); |
---|
| 97 | + else |
---|
| 98 | + bitmap_clear(asid_map, 0, NUM_USER_ASIDS); |
---|
| 99 | +} |
---|
| 100 | + |
---|
| 101 | +#define asid_gen_match(asid) \ |
---|
| 102 | + (!(((asid) ^ atomic64_read(&asid_generation)) >> asid_bits)) |
---|
| 103 | + |
---|
| 104 | +static void flush_context(void) |
---|
92 | 105 | { |
---|
93 | 106 | int i; |
---|
94 | 107 | u64 asid; |
---|
95 | 108 | |
---|
96 | 109 | /* Update the list of reserved ASIDs and the ASID bitmap. */ |
---|
97 | | - bitmap_clear(asid_map, 0, NUM_USER_ASIDS); |
---|
| 110 | + set_reserved_asid_bits(); |
---|
98 | 111 | |
---|
99 | 112 | for_each_possible_cpu(i) { |
---|
100 | 113 | asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0); |
---|
.. | .. |
---|
142 | 155 | return hit; |
---|
143 | 156 | } |
---|
144 | 157 | |
---|
145 | | -static u64 new_context(struct mm_struct *mm, unsigned int cpu) |
---|
| 158 | +static u64 new_context(struct mm_struct *mm) |
---|
146 | 159 | { |
---|
147 | 160 | static u32 cur_idx = 1; |
---|
148 | 161 | u64 asid = atomic64_read(&mm->context.id); |
---|
.. | .. |
---|
156 | 169 | * can continue to use it and this was just a false alarm. |
---|
157 | 170 | */ |
---|
158 | 171 | if (check_update_reserved_asid(asid, newasid)) |
---|
| 172 | + return newasid; |
---|
| 173 | + |
---|
| 174 | + /* |
---|
| 175 | + * If it is pinned, we can keep using it. Note that reserved |
---|
| 176 | + * takes priority, because even if it is also pinned, we need to |
---|
| 177 | + * update the generation into the reserved_asids. |
---|
| 178 | + */ |
---|
| 179 | + if (refcount_read(&mm->context.pinned)) |
---|
159 | 180 | return newasid; |
---|
160 | 181 | |
---|
161 | 182 | /* |
---|
.. | .. |
---|
180 | 201 | /* We're out of ASIDs, so increment the global generation count */ |
---|
181 | 202 | generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION, |
---|
182 | 203 | &asid_generation); |
---|
183 | | - flush_context(cpu); |
---|
| 204 | + flush_context(); |
---|
184 | 205 | |
---|
185 | 206 | /* We have more ASIDs than CPUs, so this will always succeed */ |
---|
186 | 207 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); |
---|
.. | .. |
---|
191 | 212 | return idx2asid(asid) | generation; |
---|
192 | 213 | } |
---|
193 | 214 | |
---|
194 | | -void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) |
---|
| 215 | +void check_and_switch_context(struct mm_struct *mm) |
---|
195 | 216 | { |
---|
196 | 217 | unsigned long flags; |
---|
| 218 | + unsigned int cpu; |
---|
197 | 219 | u64 asid, old_active_asid; |
---|
| 220 | + |
---|
| 221 | + if (system_supports_cnp()) |
---|
| 222 | + cpu_set_reserved_ttbr0(); |
---|
198 | 223 | |
---|
199 | 224 | asid = atomic64_read(&mm->context.id); |
---|
200 | 225 | |
---|
.. | .. |
---|
212 | 237 | * relaxed xchg in flush_context will treat us as reserved |
---|
213 | 238 | * because atomic RmWs are totally ordered for a given location. |
---|
214 | 239 | */ |
---|
215 | | - old_active_asid = atomic64_read(&per_cpu(active_asids, cpu)); |
---|
216 | | - if (old_active_asid && |
---|
217 | | - !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) && |
---|
218 | | - atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu), |
---|
| 240 | + old_active_asid = atomic64_read(this_cpu_ptr(&active_asids)); |
---|
| 241 | + if (old_active_asid && asid_gen_match(asid) && |
---|
| 242 | + atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_asids), |
---|
219 | 243 | old_active_asid, asid)) |
---|
220 | 244 | goto switch_mm_fastpath; |
---|
221 | 245 | |
---|
222 | 246 | raw_spin_lock_irqsave(&cpu_asid_lock, flags); |
---|
223 | 247 | /* Check that our ASID belongs to the current generation. */ |
---|
224 | 248 | asid = atomic64_read(&mm->context.id); |
---|
225 | | - if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) { |
---|
226 | | - asid = new_context(mm, cpu); |
---|
| 249 | + if (!asid_gen_match(asid)) { |
---|
| 250 | + asid = new_context(mm); |
---|
227 | 251 | atomic64_set(&mm->context.id, asid); |
---|
228 | 252 | } |
---|
229 | 253 | |
---|
| 254 | + cpu = smp_processor_id(); |
---|
230 | 255 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) |
---|
231 | 256 | local_flush_tlb_all(); |
---|
232 | 257 | |
---|
233 | | - atomic64_set(&per_cpu(active_asids, cpu), asid); |
---|
| 258 | + atomic64_set(this_cpu_ptr(&active_asids), asid); |
---|
234 | 259 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); |
---|
235 | 260 | |
---|
236 | 261 | switch_mm_fastpath: |
---|
.. | .. |
---|
245 | 270 | cpu_switch_mm(mm->pgd, mm); |
---|
246 | 271 | } |
---|
247 | 272 | |
---|
| 273 | +unsigned long arm64_mm_context_get(struct mm_struct *mm) |
---|
| 274 | +{ |
---|
| 275 | + unsigned long flags; |
---|
| 276 | + u64 asid; |
---|
| 277 | + |
---|
| 278 | + if (!pinned_asid_map) |
---|
| 279 | + return 0; |
---|
| 280 | + |
---|
| 281 | + raw_spin_lock_irqsave(&cpu_asid_lock, flags); |
---|
| 282 | + |
---|
| 283 | + asid = atomic64_read(&mm->context.id); |
---|
| 284 | + |
---|
| 285 | + if (refcount_inc_not_zero(&mm->context.pinned)) |
---|
| 286 | + goto out_unlock; |
---|
| 287 | + |
---|
| 288 | + if (nr_pinned_asids >= max_pinned_asids) { |
---|
| 289 | + asid = 0; |
---|
| 290 | + goto out_unlock; |
---|
| 291 | + } |
---|
| 292 | + |
---|
| 293 | + if (!asid_gen_match(asid)) { |
---|
| 294 | + /* |
---|
| 295 | + * We went through one or more rollover since that ASID was |
---|
| 296 | + * used. Ensure that it is still valid, or generate a new one. |
---|
| 297 | + */ |
---|
| 298 | + asid = new_context(mm); |
---|
| 299 | + atomic64_set(&mm->context.id, asid); |
---|
| 300 | + } |
---|
| 301 | + |
---|
| 302 | + nr_pinned_asids++; |
---|
| 303 | + __set_bit(asid2idx(asid), pinned_asid_map); |
---|
| 304 | + refcount_set(&mm->context.pinned, 1); |
---|
| 305 | + |
---|
| 306 | +out_unlock: |
---|
| 307 | + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); |
---|
| 308 | + |
---|
| 309 | + asid &= ~ASID_MASK; |
---|
| 310 | + |
---|
| 311 | + /* Set the equivalent of USER_ASID_BIT */ |
---|
| 312 | + if (asid && arm64_kernel_unmapped_at_el0()) |
---|
| 313 | + asid |= 1; |
---|
| 314 | + |
---|
| 315 | + return asid; |
---|
| 316 | +} |
---|
| 317 | +EXPORT_SYMBOL_GPL(arm64_mm_context_get); |
---|
| 318 | + |
---|
| 319 | +void arm64_mm_context_put(struct mm_struct *mm) |
---|
| 320 | +{ |
---|
| 321 | + unsigned long flags; |
---|
| 322 | + u64 asid = atomic64_read(&mm->context.id); |
---|
| 323 | + |
---|
| 324 | + if (!pinned_asid_map) |
---|
| 325 | + return; |
---|
| 326 | + |
---|
| 327 | + raw_spin_lock_irqsave(&cpu_asid_lock, flags); |
---|
| 328 | + |
---|
| 329 | + if (refcount_dec_and_test(&mm->context.pinned)) { |
---|
| 330 | + __clear_bit(asid2idx(asid), pinned_asid_map); |
---|
| 331 | + nr_pinned_asids--; |
---|
| 332 | + } |
---|
| 333 | + |
---|
| 334 | + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); |
---|
| 335 | +} |
---|
| 336 | +EXPORT_SYMBOL_GPL(arm64_mm_context_put); |
---|
| 337 | + |
---|
248 | 338 | /* Errata workaround post TTBRx_EL1 update. */ |
---|
249 | 339 | asmlinkage void post_ttbr_update_workaround(void) |
---|
250 | 340 | { |
---|
| 341 | + if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) |
---|
| 342 | + return; |
---|
| 343 | + |
---|
251 | 344 | asm(ALTERNATIVE("nop; nop; nop", |
---|
252 | 345 | "ic iallu; dsb nsh; isb", |
---|
253 | | - ARM64_WORKAROUND_CAVIUM_27456, |
---|
254 | | - CONFIG_CAVIUM_ERRATUM_27456)); |
---|
| 346 | + ARM64_WORKAROUND_CAVIUM_27456)); |
---|
255 | 347 | } |
---|
256 | 348 | |
---|
257 | | -static int asids_init(void) |
---|
| 349 | +void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm) |
---|
258 | 350 | { |
---|
259 | | - asid_bits = get_cpu_asid_bits(); |
---|
| 351 | + unsigned long ttbr1 = read_sysreg(ttbr1_el1); |
---|
| 352 | + unsigned long asid = ASID(mm); |
---|
| 353 | + unsigned long ttbr0 = phys_to_ttbr(pgd_phys); |
---|
| 354 | + |
---|
| 355 | + /* Skip CNP for the reserved ASID */ |
---|
| 356 | + if (system_supports_cnp() && asid) |
---|
| 357 | + ttbr0 |= TTBR_CNP_BIT; |
---|
| 358 | + |
---|
| 359 | + /* SW PAN needs a copy of the ASID in TTBR0 for entry */ |
---|
| 360 | + if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN)) |
---|
| 361 | + ttbr0 |= FIELD_PREP(TTBR_ASID_MASK, asid); |
---|
| 362 | + |
---|
| 363 | + /* Set ASID in TTBR1 since TCR.A1 is set */ |
---|
| 364 | + ttbr1 &= ~TTBR_ASID_MASK; |
---|
| 365 | + ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid); |
---|
| 366 | + |
---|
| 367 | + write_sysreg(ttbr1, ttbr1_el1); |
---|
| 368 | + isb(); |
---|
| 369 | + write_sysreg(ttbr0, ttbr0_el1); |
---|
| 370 | + isb(); |
---|
| 371 | + post_ttbr_update_workaround(); |
---|
| 372 | +} |
---|
| 373 | + |
---|
| 374 | +static int asids_update_limit(void) |
---|
| 375 | +{ |
---|
| 376 | + unsigned long num_available_asids = NUM_USER_ASIDS; |
---|
| 377 | + |
---|
| 378 | + if (arm64_kernel_unmapped_at_el0()) { |
---|
| 379 | + num_available_asids /= 2; |
---|
| 380 | + if (pinned_asid_map) |
---|
| 381 | + set_kpti_asid_bits(pinned_asid_map); |
---|
| 382 | + } |
---|
260 | 383 | /* |
---|
261 | 384 | * Expect allocation after rollover to fail if we don't have at least |
---|
262 | 385 | * one more ASID than CPUs. ASID #0 is reserved for init_mm. |
---|
263 | 386 | */ |
---|
264 | | - WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus()); |
---|
| 387 | + WARN_ON(num_available_asids - 1 <= num_possible_cpus()); |
---|
| 388 | + pr_info("ASID allocator initialised with %lu entries\n", |
---|
| 389 | + num_available_asids); |
---|
| 390 | + |
---|
| 391 | + /* |
---|
| 392 | + * There must always be an ASID available after rollover. Ensure that, |
---|
| 393 | + * even if all CPUs have a reserved ASID and the maximum number of ASIDs |
---|
| 394 | + * are pinned, there still is at least one empty slot in the ASID map. |
---|
| 395 | + */ |
---|
| 396 | + max_pinned_asids = num_available_asids - num_possible_cpus() - 2; |
---|
| 397 | + return 0; |
---|
| 398 | +} |
---|
| 399 | +arch_initcall(asids_update_limit); |
---|
| 400 | + |
---|
| 401 | +static int asids_init(void) |
---|
| 402 | +{ |
---|
| 403 | + asid_bits = get_cpu_asid_bits(); |
---|
265 | 404 | atomic64_set(&asid_generation, ASID_FIRST_VERSION); |
---|
266 | 405 | asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map), |
---|
267 | 406 | GFP_KERNEL); |
---|
.. | .. |
---|
269 | 408 | panic("Failed to allocate bitmap for %lu ASIDs\n", |
---|
270 | 409 | NUM_USER_ASIDS); |
---|
271 | 410 | |
---|
272 | | - pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS); |
---|
| 411 | + pinned_asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), |
---|
| 412 | + sizeof(*pinned_asid_map), GFP_KERNEL); |
---|
| 413 | + nr_pinned_asids = 0; |
---|
| 414 | + |
---|
| 415 | + /* |
---|
| 416 | + * We cannot call set_reserved_asid_bits() here because CPU |
---|
| 417 | + * caps are not finalized yet, so it is safer to assume KPTI |
---|
| 418 | + * and reserve kernel ASID's from beginning. |
---|
| 419 | + */ |
---|
| 420 | + if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) |
---|
| 421 | + set_kpti_asid_bits(asid_map); |
---|
273 | 422 | return 0; |
---|
274 | 423 | } |
---|
275 | 424 | early_initcall(asids_init); |
---|