forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/arch/parisc/kernel/cache.c
....@@ -24,31 +24,38 @@
2424 #include <asm/cacheflush.h>
2525 #include <asm/tlbflush.h>
2626 #include <asm/page.h>
27
-#include <asm/pgalloc.h>
2827 #include <asm/processor.h>
2928 #include <asm/sections.h>
3029 #include <asm/shmparam.h>
3130
32
-int split_tlb __read_mostly;
33
-int dcache_stride __read_mostly;
34
-int icache_stride __read_mostly;
31
+int split_tlb __ro_after_init;
32
+int dcache_stride __ro_after_init;
33
+int icache_stride __ro_after_init;
3534 EXPORT_SYMBOL(dcache_stride);
3635
3736 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
3837 EXPORT_SYMBOL(flush_dcache_page_asm);
38
+void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
3939 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
4040
4141
42
-/* On some machines (e.g. ones with the Merced bus), there can be
42
+/* On some machines (i.e., ones with the Merced bus), there can be
4343 * only a single PxTLB broadcast at a time; this must be guaranteed
44
- * by software. We put a spinlock around all TLB flushes to
45
- * ensure this.
44
+ * by software. We need a spinlock around all TLB flushes to ensure
45
+ * this.
4646 */
47
-DEFINE_SPINLOCK(pa_tlb_lock);
47
+DEFINE_SPINLOCK(pa_tlb_flush_lock);
4848
49
-struct pdc_cache_info cache_info __read_mostly;
49
+/* Swapper page setup lock. */
50
+DEFINE_SPINLOCK(pa_swapper_pg_lock);
51
+
52
+#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
53
+int pa_serialize_tlb_flushes __ro_after_init;
54
+#endif
55
+
56
+struct pdc_cache_info cache_info __ro_after_init;
5057 #ifndef CONFIG_PA20
51
-static struct pdc_btlb_info btlb_info __read_mostly;
58
+static struct pdc_btlb_info btlb_info __ro_after_init;
5259 #endif
5360
5461 #ifdef CONFIG_SMP
....@@ -76,9 +83,9 @@
7683 #define pfn_va(pfn) __va(PFN_PHYS(pfn))
7784
7885 void
79
-update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
86
+__update_cache(pte_t pte)
8087 {
81
- unsigned long pfn = pte_pfn(*ptep);
88
+ unsigned long pfn = pte_pfn(pte);
8289 struct page *page;
8390
8491 /* We don't have pte special. As a result, we can be called with
....@@ -303,12 +310,24 @@
303310 preempt_enable();
304311 }
305312
313
+static inline void
314
+__purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
315
+ unsigned long physaddr)
316
+{
317
+ preempt_disable();
318
+ purge_dcache_page_asm(physaddr, vmaddr);
319
+ if (vma->vm_flags & VM_EXEC)
320
+ flush_icache_page_asm(physaddr, vmaddr);
321
+ preempt_enable();
322
+}
323
+
306324 void flush_dcache_page(struct page *page)
307325 {
308326 struct address_space *mapping = page_mapping_file(page);
309327 struct vm_area_struct *mpnt;
310328 unsigned long offset;
311329 unsigned long addr, old_addr = 0;
330
+ unsigned long flags;
312331 pgoff_t pgoff;
313332
314333 if (mapping && !mapping_mapped(mapping)) {
....@@ -328,7 +347,7 @@
328347 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
329348 * to flush one address here for them all to become coherent */
330349
331
- flush_dcache_mmap_lock(mapping);
350
+ flush_dcache_mmap_lock_irqsave(mapping, flags);
332351 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
333352 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
334353 addr = mpnt->vm_start + offset;
....@@ -346,12 +365,12 @@
346365 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
347366 != (addr & (SHM_COLOUR - 1))) {
348367 __flush_cache_page(mpnt, addr, page_to_phys(page));
349
- if (old_addr)
368
+ if (parisc_requires_coherency() && old_addr)
350369 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
351370 old_addr = addr;
352371 }
353372 }
354
- flush_dcache_mmap_unlock(mapping);
373
+ flush_dcache_mmap_unlock_irqrestore(mapping, flags);
355374 }
356375 EXPORT_SYMBOL(flush_dcache_page);
357376
....@@ -362,15 +381,15 @@
362381 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
363382
364383 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
365
-static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
384
+static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
366385
367
-#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
368
-static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
386
+#define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
387
+static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
369388
370389 void __init parisc_setup_cache_timing(void)
371390 {
372391 unsigned long rangetime, alltime;
373
- unsigned long size, start;
392
+ unsigned long size;
374393 unsigned long threshold;
375394
376395 alltime = mfctl(16);
....@@ -404,28 +423,28 @@
404423 goto set_tlb_threshold;
405424 }
406425
426
+ size = (unsigned long)_end - (unsigned long)_text;
427
+ rangetime = mfctl(16);
428
+ flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end);
429
+ rangetime = mfctl(16) - rangetime;
430
+
407431 alltime = mfctl(16);
408432 flush_tlb_all();
409433 alltime = mfctl(16) - alltime;
410434
411
- size = 0;
412
- start = (unsigned long) _text;
413
- rangetime = mfctl(16);
414
- while (start < (unsigned long) _end) {
415
- flush_tlb_kernel_range(start, start + PAGE_SIZE);
416
- start += PAGE_SIZE;
417
- size += PAGE_SIZE;
418
- }
419
- rangetime = mfctl(16) - rangetime;
420
-
421
- printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
435
+ printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
422436 alltime, size, rangetime);
423437
424
- threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
438
+ threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
439
+ printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
440
+ threshold/1024);
425441
426442 set_tlb_threshold:
427
- if (threshold)
443
+ if (threshold > FLUSH_TLB_THRESHOLD)
428444 parisc_tlb_flush_threshold = threshold;
445
+ else
446
+ parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
447
+
429448 printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
430449 parisc_tlb_flush_threshold/1024);
431450 }
....@@ -477,18 +496,6 @@
477496 /* Purge TLB entries for small ranges using the pdtlb and
478497 pitlb instructions. These instructions execute locally
479498 but cause a purge request to be broadcast to other TLBs. */
480
- if (likely(!split_tlb)) {
481
- while (start < end) {
482
- purge_tlb_start(flags);
483
- mtsp(sid, 1);
484
- pdtlb(start);
485
- purge_tlb_end(flags);
486
- start += PAGE_SIZE;
487
- }
488
- return 0;
489
- }
490
-
491
- /* split TLB case */
492499 while (start < end) {
493500 purge_tlb_start(flags);
494501 mtsp(sid, 1);
....@@ -525,11 +532,14 @@
525532 pte_t *ptep = NULL;
526533
527534 if (!pgd_none(*pgd)) {
528
- pud_t *pud = pud_offset(pgd, addr);
529
- if (!pud_none(*pud)) {
530
- pmd_t *pmd = pmd_offset(pud, addr);
531
- if (!pmd_none(*pmd))
532
- ptep = pte_offset_map(pmd, addr);
535
+ p4d_t *p4d = p4d_offset(pgd, addr);
536
+ if (!p4d_none(*p4d)) {
537
+ pud_t *pud = pud_offset(p4d, addr);
538
+ if (!pud_none(*pud)) {
539
+ pmd_t *pmd = pmd_offset(pud, addr);
540
+ if (!pmd_none(*pmd))
541
+ ptep = pte_offset_map(pmd, addr);
542
+ }
533543 }
534544 }
535545 return ptep;
....@@ -573,9 +583,12 @@
573583 pfn = pte_pfn(*ptep);
574584 if (!pfn_valid(pfn))
575585 continue;
576
- if (unlikely(mm->context))
586
+ if (unlikely(mm->context)) {
577587 flush_tlb_page(vma, addr);
578
- __flush_cache_page(vma, addr, PFN_PHYS(pfn));
588
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
589
+ } else {
590
+ __purge_cache_page(vma, addr, PFN_PHYS(pfn));
591
+ }
579592 }
580593 }
581594 }
....@@ -610,9 +623,12 @@
610623 continue;
611624 pfn = pte_pfn(*ptep);
612625 if (pfn_valid(pfn)) {
613
- if (unlikely(vma->vm_mm->context))
626
+ if (unlikely(vma->vm_mm->context)) {
614627 flush_tlb_page(vma, addr);
615
- __flush_cache_page(vma, addr, PFN_PHYS(pfn));
628
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
629
+ } else {
630
+ __purge_cache_page(vma, addr, PFN_PHYS(pfn));
631
+ }
616632 }
617633 }
618634 }
....@@ -621,9 +637,12 @@
621637 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
622638 {
623639 if (pfn_valid(pfn)) {
624
- if (likely(vma->vm_mm->context))
640
+ if (likely(vma->vm_mm->context)) {
625641 flush_tlb_page(vma, vmaddr);
626
- __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
642
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
643
+ } else {
644
+ __purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
645
+ }
627646 }
628647 }
629648