forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/arch/parisc/kernel/cache.c
....@@ -24,31 +24,38 @@
2424 #include <asm/cacheflush.h>
2525 #include <asm/tlbflush.h>
2626 #include <asm/page.h>
27
-#include <asm/pgalloc.h>
2827 #include <asm/processor.h>
2928 #include <asm/sections.h>
3029 #include <asm/shmparam.h>
3130
32
-int split_tlb __read_mostly;
33
-int dcache_stride __read_mostly;
34
-int icache_stride __read_mostly;
31
+int split_tlb __ro_after_init;
32
+int dcache_stride __ro_after_init;
33
+int icache_stride __ro_after_init;
3534 EXPORT_SYMBOL(dcache_stride);
3635
3736 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
3837 EXPORT_SYMBOL(flush_dcache_page_asm);
38
+void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
3939 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
4040
4141
42
-/* On some machines (e.g. ones with the Merced bus), there can be
42
+/* On some machines (i.e., ones with the Merced bus), there can be
4343 * only a single PxTLB broadcast at a time; this must be guaranteed
44
- * by software. We put a spinlock around all TLB flushes to
45
- * ensure this.
44
+ * by software. We need a spinlock around all TLB flushes to ensure
45
+ * this.
4646 */
47
-DEFINE_SPINLOCK(pa_tlb_lock);
47
+DEFINE_SPINLOCK(pa_tlb_flush_lock);
4848
49
-struct pdc_cache_info cache_info __read_mostly;
49
+/* Swapper page setup lock. */
50
+DEFINE_SPINLOCK(pa_swapper_pg_lock);
51
+
52
+#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
53
+int pa_serialize_tlb_flushes __ro_after_init;
54
+#endif
55
+
56
+struct pdc_cache_info cache_info __ro_after_init;
5057 #ifndef CONFIG_PA20
51
-static struct pdc_btlb_info btlb_info __read_mostly;
58
+static struct pdc_btlb_info btlb_info __ro_after_init;
5259 #endif
5360
5461 #ifdef CONFIG_SMP
....@@ -76,9 +83,9 @@
7683 #define pfn_va(pfn) __va(PFN_PHYS(pfn))
7784
7885 void
79
-update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
86
+__update_cache(pte_t pte)
8087 {
81
- unsigned long pfn = pte_pfn(*ptep);
88
+ unsigned long pfn = pte_pfn(pte);
8289 struct page *page;
8390
8491 /* We don't have pte special. As a result, we can be called with
....@@ -303,6 +310,17 @@
303310 preempt_enable();
304311 }
305312
313
+static inline void
314
+__purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
315
+ unsigned long physaddr)
316
+{
317
+ preempt_disable();
318
+ purge_dcache_page_asm(physaddr, vmaddr);
319
+ if (vma->vm_flags & VM_EXEC)
320
+ flush_icache_page_asm(physaddr, vmaddr);
321
+ preempt_enable();
322
+}
323
+
306324 void flush_dcache_page(struct page *page)
307325 {
308326 struct address_space *mapping = page_mapping_file(page);
....@@ -346,7 +364,7 @@
346364 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
347365 != (addr & (SHM_COLOUR - 1))) {
348366 __flush_cache_page(mpnt, addr, page_to_phys(page));
349
- if (old_addr)
367
+ if (parisc_requires_coherency() && old_addr)
350368 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
351369 old_addr = addr;
352370 }
....@@ -362,15 +380,15 @@
362380 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
363381
364382 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
365
-static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
383
+static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
366384
367
-#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
368
-static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
385
+#define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
386
+static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
369387
370388 void __init parisc_setup_cache_timing(void)
371389 {
372390 unsigned long rangetime, alltime;
373
- unsigned long size, start;
391
+ unsigned long size;
374392 unsigned long threshold;
375393
376394 alltime = mfctl(16);
....@@ -404,28 +422,28 @@
404422 goto set_tlb_threshold;
405423 }
406424
425
+ size = (unsigned long)_end - (unsigned long)_text;
426
+ rangetime = mfctl(16);
427
+ flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end);
428
+ rangetime = mfctl(16) - rangetime;
429
+
407430 alltime = mfctl(16);
408431 flush_tlb_all();
409432 alltime = mfctl(16) - alltime;
410433
411
- size = 0;
412
- start = (unsigned long) _text;
413
- rangetime = mfctl(16);
414
- while (start < (unsigned long) _end) {
415
- flush_tlb_kernel_range(start, start + PAGE_SIZE);
416
- start += PAGE_SIZE;
417
- size += PAGE_SIZE;
418
- }
419
- rangetime = mfctl(16) - rangetime;
420
-
421
- printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
434
+ printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
422435 alltime, size, rangetime);
423436
424
- threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
437
+ threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
438
+ printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
439
+ threshold/1024);
425440
426441 set_tlb_threshold:
427
- if (threshold)
442
+ if (threshold > FLUSH_TLB_THRESHOLD)
428443 parisc_tlb_flush_threshold = threshold;
444
+ else
445
+ parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
446
+
429447 printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
430448 parisc_tlb_flush_threshold/1024);
431449 }
....@@ -477,18 +495,6 @@
477495 /* Purge TLB entries for small ranges using the pdtlb and
478496 pitlb instructions. These instructions execute locally
479497 but cause a purge request to be broadcast to other TLBs. */
480
- if (likely(!split_tlb)) {
481
- while (start < end) {
482
- purge_tlb_start(flags);
483
- mtsp(sid, 1);
484
- pdtlb(start);
485
- purge_tlb_end(flags);
486
- start += PAGE_SIZE;
487
- }
488
- return 0;
489
- }
490
-
491
- /* split TLB case */
492498 while (start < end) {
493499 purge_tlb_start(flags);
494500 mtsp(sid, 1);
....@@ -525,11 +531,14 @@
525531 pte_t *ptep = NULL;
526532
527533 if (!pgd_none(*pgd)) {
528
- pud_t *pud = pud_offset(pgd, addr);
529
- if (!pud_none(*pud)) {
530
- pmd_t *pmd = pmd_offset(pud, addr);
531
- if (!pmd_none(*pmd))
532
- ptep = pte_offset_map(pmd, addr);
534
+ p4d_t *p4d = p4d_offset(pgd, addr);
535
+ if (!p4d_none(*p4d)) {
536
+ pud_t *pud = pud_offset(p4d, addr);
537
+ if (!pud_none(*pud)) {
538
+ pmd_t *pmd = pmd_offset(pud, addr);
539
+ if (!pmd_none(*pmd))
540
+ ptep = pte_offset_map(pmd, addr);
541
+ }
533542 }
534543 }
535544 return ptep;
....@@ -573,9 +582,12 @@
573582 pfn = pte_pfn(*ptep);
574583 if (!pfn_valid(pfn))
575584 continue;
576
- if (unlikely(mm->context))
585
+ if (unlikely(mm->context)) {
577586 flush_tlb_page(vma, addr);
578
- __flush_cache_page(vma, addr, PFN_PHYS(pfn));
587
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
588
+ } else {
589
+ __purge_cache_page(vma, addr, PFN_PHYS(pfn));
590
+ }
579591 }
580592 }
581593 }
....@@ -610,9 +622,12 @@
610622 continue;
611623 pfn = pte_pfn(*ptep);
612624 if (pfn_valid(pfn)) {
613
- if (unlikely(vma->vm_mm->context))
625
+ if (unlikely(vma->vm_mm->context)) {
614626 flush_tlb_page(vma, addr);
615
- __flush_cache_page(vma, addr, PFN_PHYS(pfn));
627
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
628
+ } else {
629
+ __purge_cache_page(vma, addr, PFN_PHYS(pfn));
630
+ }
616631 }
617632 }
618633 }
....@@ -621,9 +636,12 @@
621636 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
622637 {
623638 if (pfn_valid(pfn)) {
624
- if (likely(vma->vm_mm->context))
639
+ if (likely(vma->vm_mm->context)) {
625640 flush_tlb_page(vma, vmaddr);
626
- __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
641
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
642
+ } else {
643
+ __purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
644
+ }
627645 }
628646 }
629647