forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 748e4f3d702def1a4bff191e0cf93b6a05340f01
kernel/arch/arc/mm/tlb.c
....@@ -1,11 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * TLB Management (flush/create/diagnostics) for ARC700
34 *
45 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5
- *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License version 2 as
8
- * published by the Free Software Foundation.
96 *
107 * vineetg: Aug 2011
118 * -Reintroduce duplicate PD fixup - some customer chips still have the issue
....@@ -33,14 +30,14 @@
3330 * -Changes related to MMU v2 (Rel 4.8)
3431 *
3532 * Vineetg: Aug 29th 2008
36
- * -In TLB Flush operations (Metal Fix MMU) there is a explict command to
33
+ * -In TLB Flush operations (Metal Fix MMU) there is a explicit command to
3734 * flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd,
3835 * it fails. Thus need to load it with ANY valid value before invoking
3936 * TLBIVUTLB cmd
4037 *
4138 * Vineetg: Aug 21th 2008:
4239 * -Reduced the duration of IRQ lockouts in TLB Flush routines
43
- * -Multiple copies of TLB erase code seperated into a "single" function
40
+ * -Multiple copies of TLB erase code separated into a "single" function
4441 * -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID
4542 * in interrupt-safe region.
4643 *
....@@ -69,7 +66,7 @@
6966 *
7067 * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has
7168 * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways.
72
- * Given this, the thrasing problem should never happen because once the 3
69
+ * Given this, the thrashing problem should never happen because once the 3
7370 * J-TLB entries are created (even though 3rd will knock out one of the prev
7471 * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy
7572 *
....@@ -121,6 +118,33 @@
121118 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
122119 }
123120
121
+static void utlb_invalidate(void)
122
+{
123
+#if (CONFIG_ARC_MMU_VER >= 2)
124
+
125
+#if (CONFIG_ARC_MMU_VER == 2)
126
+ /* MMU v2 introduced the uTLB Flush command.
127
+ * There was however an obscure hardware bug, where uTLB flush would
128
+ * fail when a prior probe for J-TLB (both totally unrelated) would
129
+ * return lkup err - because the entry didn't exist in MMU.
130
+ * The Workaround was to set Index reg with some valid value, prior to
131
+ * flush. This was fixed in MMU v3
132
+ */
133
+ unsigned int idx;
134
+
135
+ /* make sure INDEX Reg is valid */
136
+ idx = read_aux_reg(ARC_REG_TLBINDEX);
137
+
138
+ /* If not write some dummy val */
139
+ if (unlikely(idx & TLB_LKUP_ERR))
140
+ write_aux_reg(ARC_REG_TLBINDEX, 0xa);
141
+#endif
142
+
143
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
144
+#endif
145
+
146
+}
147
+
124148 #if (CONFIG_ARC_MMU_VER < 4)
125149
126150 static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
....@@ -150,44 +174,6 @@
150174 WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n",
151175 vaddr_n_asid);
152176 }
153
-}
154
-
155
-/****************************************************************************
156
- * ARC700 MMU caches recently used J-TLB entries (RAM) as uTLBs (FLOPs)
157
- *
158
- * New IVUTLB cmd in MMU v2 explictly invalidates the uTLB
159
- *
160
- * utlb_invalidate ( )
161
- * -For v2 MMU calls Flush uTLB Cmd
162
- * -For v1 MMU does nothing (except for Metal Fix v1 MMU)
163
- * This is because in v1 TLBWrite itself invalidate uTLBs
164
- ***************************************************************************/
165
-
166
-static void utlb_invalidate(void)
167
-{
168
-#if (CONFIG_ARC_MMU_VER >= 2)
169
-
170
-#if (CONFIG_ARC_MMU_VER == 2)
171
- /* MMU v2 introduced the uTLB Flush command.
172
- * There was however an obscure hardware bug, where uTLB flush would
173
- * fail when a prior probe for J-TLB (both totally unrelated) would
174
- * return lkup err - because the entry didn't exist in MMU.
175
- * The Workround was to set Index reg with some valid value, prior to
176
- * flush. This was fixed in MMU v3 hence not needed any more
177
- */
178
- unsigned int idx;
179
-
180
- /* make sure INDEX Reg is valid */
181
- idx = read_aux_reg(ARC_REG_TLBINDEX);
182
-
183
- /* If not write some dummy val */
184
- if (unlikely(idx & TLB_LKUP_ERR))
185
- write_aux_reg(ARC_REG_TLBINDEX, 0xa);
186
-#endif
187
-
188
- write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
189
-#endif
190
-
191177 }
192178
193179 static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
....@@ -221,11 +207,6 @@
221207 }
222208
223209 #else /* CONFIG_ARC_MMU_VER >= 4) */
224
-
225
-static void utlb_invalidate(void)
226
-{
227
- /* No need since uTLB is always in sync with JTLB */
228
-}
229210
230211 static void tlb_entry_erase(unsigned int vaddr_n_asid)
231212 {
....@@ -270,7 +251,7 @@
270251 for (entry = 0; entry < num_tlb; entry++) {
271252 /* write this entry to the TLB */
272253 write_aux_reg(ARC_REG_TLBINDEX, entry);
273
- write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
254
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
274255 }
275256
276257 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
....@@ -281,7 +262,7 @@
281262
282263 for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
283264 write_aux_reg(ARC_REG_TLBINDEX, entry);
284
- write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
265
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
285266 }
286267 }
287268
....@@ -291,7 +272,7 @@
291272 }
292273
293274 /*
294
- * Flush the entrie MM for userland. The fastest way is to move to Next ASID
275
+ * Flush the entire MM for userland. The fastest way is to move to Next ASID
295276 */
296277 noinline void local_flush_tlb_mm(struct mm_struct *mm)
297278 {
....@@ -322,7 +303,7 @@
322303 * Difference between this and Kernel Range Flush is
323304 * -Here the fastest way (if range is too large) is to move to next ASID
324305 * without doing any explicit Shootdown
325
- * -In case of kernel Flush, entry has to be shot down explictly
306
+ * -In case of kernel Flush, entry has to be shot down explicitly
326307 */
327308 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
328309 unsigned long end)
....@@ -358,8 +339,6 @@
358339 }
359340 }
360341
361
- utlb_invalidate();
362
-
363342 local_irq_restore(flags);
364343 }
365344
....@@ -388,8 +367,6 @@
388367 start += PAGE_SIZE;
389368 }
390369
391
- utlb_invalidate();
392
-
393370 local_irq_restore(flags);
394371 }
395372
....@@ -410,7 +387,6 @@
410387
411388 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
412389 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
413
- utlb_invalidate();
414390 }
415391
416392 local_irq_restore(flags);
....@@ -600,7 +576,7 @@
600576 pte_t *ptep)
601577 {
602578 unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
603
- phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
579
+ phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
604580 struct page *page = pfn_to_page(pte_pfn(*ptep));
605581
606582 create_tlb(vma, vaddr, ptep);
....@@ -644,7 +620,7 @@
644620 * Super Page size is configurable in hardware (4K to 16M), but fixed once
645621 * RTL builds.
646622 *
647
- * The exact THP size a Linx configuration will support is a function of:
623
+ * The exact THP size a Linux configuration will support is a function of:
648624 * - MMU page size (typical 8K, RTL fixed)
649625 * - software page walker address split between PGD:PTE:PFN (typical
650626 * 11:8:13, but can be changed with 1 line)
....@@ -722,7 +698,7 @@
722698
723699 #endif
724700
725
-/* Read the Cache Build Confuration Registers, Decode them and save into
701
+/* Read the Cache Build Configuration Registers, Decode them and save into
726702 * the cpuinfo structure for later use.
727703 * No Validation is done here, simply read/convert the BCRs
728704 */
....@@ -827,13 +803,13 @@
827803 pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
828804
829805 /*
830
- * Can't be done in processor.h due to header include depenedencies
806
+ * Can't be done in processor.h due to header include dependencies
831807 */
832808 BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
833809
834810 /*
835811 * stack top size sanity check,
836
- * Can't be done in processor.h due to header include depenedencies
812
+ * Can't be done in processor.h due to header include dependencies
837813 */
838814 BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
839815
....@@ -871,7 +847,7 @@
871847 write_aux_reg(ARC_REG_PID, MMU_ENABLE);
872848
873849 /* In smp we use this reg for interrupt 1 scratch */
874
-#ifndef CONFIG_SMP
850
+#ifdef ARC_USE_SCRATCH_REG
875851 /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
876852 write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
877853 #endif
....@@ -905,7 +881,7 @@
905881 * the duplicate one.
906882 * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
907883 */
908
-volatile int dup_pd_silent; /* Be slient abt it or complain (default) */
884
+volatile int dup_pd_silent; /* Be silent abt it or complain (default) */
909885
910886 void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
911887 struct pt_regs *regs)
....@@ -972,7 +948,7 @@
972948
973949 /***********************************************************************
974950 * Diagnostic Routines
975
- * -Called from Low Level TLB Hanlders if things don;t look good
951
+ * -Called from Low Level TLB Handlers if things don;t look good
976952 **********************************************************************/
977953
978954 #ifdef CONFIG_ARC_DBG_TLB_PARANOIA