.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * TLB Management (flush/create/diagnostics) for ARC700 |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
---|
5 | | - * |
---|
6 | | - * This program is free software; you can redistribute it and/or modify |
---|
7 | | - * it under the terms of the GNU General Public License version 2 as |
---|
8 | | - * published by the Free Software Foundation. |
---|
9 | 6 | * |
---|
10 | 7 | * vineetg: Aug 2011 |
---|
11 | 8 | * -Reintroduce duplicate PD fixup - some customer chips still have the issue |
---|
.. | .. |
---|
33 | 30 | * -Changes related to MMU v2 (Rel 4.8) |
---|
34 | 31 | * |
---|
35 | 32 | * Vineetg: Aug 29th 2008 |
---|
36 | | - * -In TLB Flush operations (Metal Fix MMU) there is a explict command to |
---|
| 33 | + * -In TLB Flush operations (Metal Fix MMU) there is a explicit command to |
---|
37 | 34 | * flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd, |
---|
38 | 35 | * it fails. Thus need to load it with ANY valid value before invoking |
---|
39 | 36 | * TLBIVUTLB cmd |
---|
40 | 37 | * |
---|
41 | 38 | * Vineetg: Aug 21th 2008: |
---|
42 | 39 | * -Reduced the duration of IRQ lockouts in TLB Flush routines |
---|
43 | | - * -Multiple copies of TLB erase code seperated into a "single" function |
---|
| 40 | + * -Multiple copies of TLB erase code separated into a "single" function |
---|
44 | 41 | * -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID |
---|
45 | 42 | * in interrupt-safe region. |
---|
46 | 43 | * |
---|
.. | .. |
---|
69 | 66 | * |
---|
70 | 67 | * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has |
---|
71 | 68 | * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways. |
---|
72 | | - * Given this, the thrasing problem should never happen because once the 3 |
---|
| 69 | + * Given this, the thrashing problem should never happen because once the 3 |
---|
73 | 70 | * J-TLB entries are created (even though 3rd will knock out one of the prev |
---|
74 | 71 | * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy |
---|
75 | 72 | * |
---|
.. | .. |
---|
121 | 118 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); |
---|
122 | 119 | } |
---|
123 | 120 | |
---|
| 121 | +static void utlb_invalidate(void) |
---|
| 122 | +{ |
---|
| 123 | +#if (CONFIG_ARC_MMU_VER >= 2) |
---|
| 124 | + |
---|
| 125 | +#if (CONFIG_ARC_MMU_VER == 2) |
---|
| 126 | + /* MMU v2 introduced the uTLB Flush command. |
---|
| 127 | + * There was however an obscure hardware bug, where uTLB flush would |
---|
| 128 | + * fail when a prior probe for J-TLB (both totally unrelated) would |
---|
| 129 | + * return lkup err - because the entry didn't exist in MMU. |
---|
| 130 | + * The Workaround was to set Index reg with some valid value, prior to |
---|
| 131 | + * flush. This was fixed in MMU v3 |
---|
| 132 | + */ |
---|
| 133 | + unsigned int idx; |
---|
| 134 | + |
---|
| 135 | + /* make sure INDEX Reg is valid */ |
---|
| 136 | + idx = read_aux_reg(ARC_REG_TLBINDEX); |
---|
| 137 | + |
---|
| 138 | + /* If not write some dummy val */ |
---|
| 139 | + if (unlikely(idx & TLB_LKUP_ERR)) |
---|
| 140 | + write_aux_reg(ARC_REG_TLBINDEX, 0xa); |
---|
| 141 | +#endif |
---|
| 142 | + |
---|
| 143 | + write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB); |
---|
| 144 | +#endif |
---|
| 145 | + |
---|
| 146 | +} |
---|
| 147 | + |
---|
124 | 148 | #if (CONFIG_ARC_MMU_VER < 4) |
---|
125 | 149 | |
---|
126 | 150 | static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid) |
---|
.. | .. |
---|
150 | 174 | WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n", |
---|
151 | 175 | vaddr_n_asid); |
---|
152 | 176 | } |
---|
153 | | -} |
---|
154 | | - |
---|
155 | | -/**************************************************************************** |
---|
156 | | - * ARC700 MMU caches recently used J-TLB entries (RAM) as uTLBs (FLOPs) |
---|
157 | | - * |
---|
158 | | - * New IVUTLB cmd in MMU v2 explictly invalidates the uTLB |
---|
159 | | - * |
---|
160 | | - * utlb_invalidate ( ) |
---|
161 | | - * -For v2 MMU calls Flush uTLB Cmd |
---|
162 | | - * -For v1 MMU does nothing (except for Metal Fix v1 MMU) |
---|
163 | | - * This is because in v1 TLBWrite itself invalidate uTLBs |
---|
164 | | - ***************************************************************************/ |
---|
165 | | - |
---|
166 | | -static void utlb_invalidate(void) |
---|
167 | | -{ |
---|
168 | | -#if (CONFIG_ARC_MMU_VER >= 2) |
---|
169 | | - |
---|
170 | | -#if (CONFIG_ARC_MMU_VER == 2) |
---|
171 | | - /* MMU v2 introduced the uTLB Flush command. |
---|
172 | | - * There was however an obscure hardware bug, where uTLB flush would |
---|
173 | | - * fail when a prior probe for J-TLB (both totally unrelated) would |
---|
174 | | - * return lkup err - because the entry didn't exist in MMU. |
---|
175 | | - * The Workround was to set Index reg with some valid value, prior to |
---|
176 | | - * flush. This was fixed in MMU v3 hence not needed any more |
---|
177 | | - */ |
---|
178 | | - unsigned int idx; |
---|
179 | | - |
---|
180 | | - /* make sure INDEX Reg is valid */ |
---|
181 | | - idx = read_aux_reg(ARC_REG_TLBINDEX); |
---|
182 | | - |
---|
183 | | - /* If not write some dummy val */ |
---|
184 | | - if (unlikely(idx & TLB_LKUP_ERR)) |
---|
185 | | - write_aux_reg(ARC_REG_TLBINDEX, 0xa); |
---|
186 | | -#endif |
---|
187 | | - |
---|
188 | | - write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB); |
---|
189 | | -#endif |
---|
190 | | - |
---|
191 | 177 | } |
---|
192 | 178 | |
---|
193 | 179 | static void tlb_entry_insert(unsigned int pd0, pte_t pd1) |
---|
.. | .. |
---|
221 | 207 | } |
---|
222 | 208 | |
---|
223 | 209 | #else /* CONFIG_ARC_MMU_VER >= 4) */ |
---|
224 | | - |
---|
225 | | -static void utlb_invalidate(void) |
---|
226 | | -{ |
---|
227 | | - /* No need since uTLB is always in sync with JTLB */ |
---|
228 | | -} |
---|
229 | 210 | |
---|
230 | 211 | static void tlb_entry_erase(unsigned int vaddr_n_asid) |
---|
231 | 212 | { |
---|
.. | .. |
---|
270 | 251 | for (entry = 0; entry < num_tlb; entry++) { |
---|
271 | 252 | /* write this entry to the TLB */ |
---|
272 | 253 | write_aux_reg(ARC_REG_TLBINDEX, entry); |
---|
273 | | - write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); |
---|
| 254 | + write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI); |
---|
274 | 255 | } |
---|
275 | 256 | |
---|
276 | 257 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { |
---|
.. | .. |
---|
281 | 262 | |
---|
282 | 263 | for (entry = stlb_idx; entry < stlb_idx + 16; entry++) { |
---|
283 | 264 | write_aux_reg(ARC_REG_TLBINDEX, entry); |
---|
284 | | - write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); |
---|
| 265 | + write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI); |
---|
285 | 266 | } |
---|
286 | 267 | } |
---|
287 | 268 | |
---|
.. | .. |
---|
291 | 272 | } |
---|
292 | 273 | |
---|
293 | 274 | /* |
---|
294 | | - * Flush the entrie MM for userland. The fastest way is to move to Next ASID |
---|
| 275 | + * Flush the entire MM for userland. The fastest way is to move to Next ASID |
---|
295 | 276 | */ |
---|
296 | 277 | noinline void local_flush_tlb_mm(struct mm_struct *mm) |
---|
297 | 278 | { |
---|
.. | .. |
---|
322 | 303 | * Difference between this and Kernel Range Flush is |
---|
323 | 304 | * -Here the fastest way (if range is too large) is to move to next ASID |
---|
324 | 305 | * without doing any explicit Shootdown |
---|
325 | | - * -In case of kernel Flush, entry has to be shot down explictly |
---|
| 306 | + * -In case of kernel Flush, entry has to be shot down explicitly |
---|
326 | 307 | */ |
---|
327 | 308 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
---|
328 | 309 | unsigned long end) |
---|
.. | .. |
---|
358 | 339 | } |
---|
359 | 340 | } |
---|
360 | 341 | |
---|
361 | | - utlb_invalidate(); |
---|
362 | | - |
---|
363 | 342 | local_irq_restore(flags); |
---|
364 | 343 | } |
---|
365 | 344 | |
---|
.. | .. |
---|
388 | 367 | start += PAGE_SIZE; |
---|
389 | 368 | } |
---|
390 | 369 | |
---|
391 | | - utlb_invalidate(); |
---|
392 | | - |
---|
393 | 370 | local_irq_restore(flags); |
---|
394 | 371 | } |
---|
395 | 372 | |
---|
.. | .. |
---|
410 | 387 | |
---|
411 | 388 | if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { |
---|
412 | 389 | tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); |
---|
413 | | - utlb_invalidate(); |
---|
414 | 390 | } |
---|
415 | 391 | |
---|
416 | 392 | local_irq_restore(flags); |
---|
.. | .. |
---|
600 | 576 | pte_t *ptep) |
---|
601 | 577 | { |
---|
602 | 578 | unsigned long vaddr = vaddr_unaligned & PAGE_MASK; |
---|
603 | | - phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK; |
---|
| 579 | + phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS; |
---|
604 | 580 | struct page *page = pfn_to_page(pte_pfn(*ptep)); |
---|
605 | 581 | |
---|
606 | 582 | create_tlb(vma, vaddr, ptep); |
---|
.. | .. |
---|
644 | 620 | * Super Page size is configurable in hardware (4K to 16M), but fixed once |
---|
645 | 621 | * RTL builds. |
---|
646 | 622 | * |
---|
647 | | - * The exact THP size a Linx configuration will support is a function of: |
---|
| 623 | + * The exact THP size a Linux configuration will support is a function of: |
---|
648 | 624 | * - MMU page size (typical 8K, RTL fixed) |
---|
649 | 625 | * - software page walker address split between PGD:PTE:PFN (typical |
---|
650 | 626 | * 11:8:13, but can be changed with 1 line) |
---|
.. | .. |
---|
722 | 698 | |
---|
723 | 699 | #endif |
---|
724 | 700 | |
---|
725 | | -/* Read the Cache Build Confuration Registers, Decode them and save into |
---|
| 701 | +/* Read the Cache Build Configuration Registers, Decode them and save into |
---|
726 | 702 | * the cpuinfo structure for later use. |
---|
727 | 703 | * No Validation is done here, simply read/convert the BCRs |
---|
728 | 704 | */ |
---|
.. | .. |
---|
827 | 803 | pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str))); |
---|
828 | 804 | |
---|
829 | 805 | /* |
---|
830 | | - * Can't be done in processor.h due to header include depenedencies |
---|
| 806 | + * Can't be done in processor.h due to header include dependencies |
---|
831 | 807 | */ |
---|
832 | 808 | BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE)); |
---|
833 | 809 | |
---|
834 | 810 | /* |
---|
835 | 811 | * stack top size sanity check, |
---|
836 | | - * Can't be done in processor.h due to header include depenedencies |
---|
| 812 | + * Can't be done in processor.h due to header include dependencies |
---|
837 | 813 | */ |
---|
838 | 814 | BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE)); |
---|
839 | 815 | |
---|
.. | .. |
---|
871 | 847 | write_aux_reg(ARC_REG_PID, MMU_ENABLE); |
---|
872 | 848 | |
---|
873 | 849 | /* In smp we use this reg for interrupt 1 scratch */ |
---|
874 | | -#ifndef CONFIG_SMP |
---|
| 850 | +#ifdef ARC_USE_SCRATCH_REG |
---|
875 | 851 | /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */ |
---|
876 | 852 | write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir); |
---|
877 | 853 | #endif |
---|
.. | .. |
---|
905 | 881 | * the duplicate one. |
---|
906 | 882 | * -Knob to be verbose abt it.(TODO: hook them up to debugfs) |
---|
907 | 883 | */ |
---|
908 | | -volatile int dup_pd_silent; /* Be slient abt it or complain (default) */ |
---|
| 884 | +volatile int dup_pd_silent; /* Be silent abt it or complain (default) */ |
---|
909 | 885 | |
---|
910 | 886 | void do_tlb_overlap_fault(unsigned long cause, unsigned long address, |
---|
911 | 887 | struct pt_regs *regs) |
---|
.. | .. |
---|
972 | 948 | |
---|
973 | 949 | /*********************************************************************** |
---|
974 | 950 | * Diagnostic Routines |
---|
975 | | - * -Called from Low Level TLB Hanlders if things don;t look good |
---|
| 951 | + * -Called from Low Level TLB Handlers if things don;t look good |
---|
976 | 952 | **********************************************************************/ |
---|
977 | 953 | |
---|
978 | 954 | #ifdef CONFIG_ARC_DBG_TLB_PARANOIA |
---|