| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright (C) 2017 SiFive |
|---|
| 3 | | - * |
|---|
| 4 | | - * This program is free software; you can redistribute it and/or |
|---|
| 5 | | - * modify it under the terms of the GNU General Public License |
|---|
| 6 | | - * as published by the Free Software Foundation, version 2. |
|---|
| 7 | | - * |
|---|
| 8 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 9 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 10 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 11 | | - * GNU General Public License for more details. |
|---|
| 12 | 4 | */ |
|---|
| 13 | 5 | |
|---|
| 14 | | -#include <asm/pgtable.h> |
|---|
| 15 | 6 | #include <asm/cacheflush.h> |
|---|
| 16 | 7 | |
|---|
| 8 | +#ifdef CONFIG_SMP |
|---|
| 9 | + |
|---|
| 10 | +#include <asm/sbi.h> |
|---|
| 11 | + |
|---|
| 12 | +static void ipi_remote_fence_i(void *info) |
|---|
| 13 | +{ |
|---|
| 14 | + return local_flush_icache_all(); |
|---|
| 15 | +} |
|---|
| 16 | + |
|---|
| 17 | +void flush_icache_all(void) |
|---|
| 18 | +{ |
|---|
| 19 | + local_flush_icache_all(); |
|---|
| 20 | + |
|---|
| 21 | + if (IS_ENABLED(CONFIG_RISCV_SBI)) |
|---|
| 22 | + sbi_remote_fence_i(NULL); |
|---|
| 23 | + else |
|---|
| 24 | + on_each_cpu(ipi_remote_fence_i, NULL, 1); |
|---|
| 25 | +} |
|---|
| 26 | +EXPORT_SYMBOL(flush_icache_all); |
|---|
| 27 | + |
|---|
| 28 | +/* |
|---|
| 29 | + * Performs an icache flush for the given MM context. RISC-V has no direct |
|---|
| 30 | + * mechanism for instruction cache shoot downs, so instead we send an IPI that |
|---|
| 31 | + * informs the remote harts they need to flush their local instruction caches. |
|---|
| 32 | + * To avoid pathologically slow behavior in a common case (a bunch of |
|---|
| 33 | + * single-hart processes on a many-hart machine, ie 'make -j') we avoid the |
|---|
| 34 | + * IPIs for harts that are not currently executing a MM context and instead |
|---|
| 35 | + * schedule a deferred local instruction cache flush to be performed before |
|---|
| 36 | + * execution resumes on each hart. |
|---|
| 37 | + */ |
|---|
| 38 | +void flush_icache_mm(struct mm_struct *mm, bool local) |
|---|
| 39 | +{ |
|---|
| 40 | + unsigned int cpu; |
|---|
| 41 | + cpumask_t others, *mask; |
|---|
| 42 | + |
|---|
| 43 | + preempt_disable(); |
|---|
| 44 | + |
|---|
| 45 | + /* Mark every hart's icache as needing a flush for this MM. */ |
|---|
| 46 | + mask = &mm->context.icache_stale_mask; |
|---|
| 47 | + cpumask_setall(mask); |
|---|
| 48 | + /* Flush this hart's I$ now, and mark it as flushed. */ |
|---|
| 49 | + cpu = smp_processor_id(); |
|---|
| 50 | + cpumask_clear_cpu(cpu, mask); |
|---|
| 51 | + local_flush_icache_all(); |
|---|
| 52 | + |
|---|
| 53 | + /* |
|---|
| 54 | + * Flush the I$ of other harts concurrently executing, and mark them as |
|---|
| 55 | + * flushed. |
|---|
| 56 | + */ |
|---|
| 57 | + cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); |
|---|
| 58 | + local |= cpumask_empty(&others); |
|---|
| 59 | + if (mm == current->active_mm && local) { |
|---|
| 60 | + /* |
|---|
| 61 | + * It's assumed that at least one strongly ordered operation is |
|---|
| 62 | + * performed on this hart between setting a hart's cpumask bit |
|---|
| 63 | + * and scheduling this MM context on that hart. Sending an SBI |
|---|
| 64 | + * remote message will do this, but in the case where no |
|---|
| 65 | + * messages are sent we still need to order this hart's writes |
|---|
| 66 | + * with flush_icache_deferred(). |
|---|
| 67 | + */ |
|---|
| 68 | + smp_mb(); |
|---|
| 69 | + } else if (IS_ENABLED(CONFIG_RISCV_SBI)) { |
|---|
| 70 | + cpumask_t hartid_mask; |
|---|
| 71 | + |
|---|
| 72 | + riscv_cpuid_to_hartid_mask(&others, &hartid_mask); |
|---|
| 73 | + sbi_remote_fence_i(cpumask_bits(&hartid_mask)); |
|---|
| 74 | + } else { |
|---|
| 75 | + on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1); |
|---|
| 76 | + } |
|---|
| 77 | + |
|---|
| 78 | + preempt_enable(); |
|---|
| 79 | +} |
|---|
| 80 | + |
|---|
| 81 | +#endif /* CONFIG_SMP */ |
|---|
| 82 | + |
|---|
| 83 | +#ifdef CONFIG_MMU |
|---|
| 17 | 84 | void flush_icache_pte(pte_t pte) |
|---|
| 18 | 85 | { |
|---|
| 19 | 86 | struct page *page = pte_page(pte); |
|---|
| .. | .. |
|---|
| 21 | 88 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) |
|---|
| 22 | 89 | flush_icache_all(); |
|---|
| 23 | 90 | } |
|---|
| 91 | +#endif /* CONFIG_MMU */ |
|---|