forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-09-20 cf4ce59b3b70238352c7f1729f0f7223214828ad
kernel/arch/mips/mm/c-r4k.c
....@@ -29,7 +29,6 @@
2929 #include <asm/cpu-type.h>
3030 #include <asm/io.h>
3131 #include <asm/page.h>
32
-#include <asm/pgtable.h>
3332 #include <asm/r4kcache.h>
3433 #include <asm/sections.h>
3534 #include <asm/mmu_context.h>
....@@ -131,9 +130,10 @@
131130
132131 #define R4600_HIT_CACHEOP_WAR_IMPL \
133132 do { \
134
- if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
133
+ if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && \
134
+ cpu_is_r4600_v2_x()) \
135135 *(volatile unsigned long *)CKSEG1; \
136
- if (R4600_V1_HIT_CACHEOP_WAR) \
136
+ if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP)) \
137137 __asm__ __volatile__("nop;nop;nop;nop"); \
138138 } while (0)
139139
....@@ -239,7 +239,7 @@
239239 r4k_blast_dcache = blast_dcache128;
240240 }
241241
242
-/* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
242
+/* force code alignment (used for CONFIG_WAR_TX49XX_ICACHE_INDEX_INV) */
243243 #define JUMP_TO_ALIGN(order) \
244244 __asm__ __volatile__( \
245245 "b\t1f\n\t" \
....@@ -271,12 +271,14 @@
271271 /* I'm in even chunk. blast odd chunks */
272272 for (ws = 0; ws < ws_end; ws += ws_inc)
273273 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
274
- cache32_unroll32(addr|ws, Index_Invalidate_I);
274
+ cache_unroll(32, kernel_cache, Index_Invalidate_I,
275
+ addr | ws, 32);
275276 CACHE32_UNROLL32_ALIGN;
276277 /* I'm in odd chunk. blast even chunks */
277278 for (ws = 0; ws < ws_end; ws += ws_inc)
278279 for (addr = start; addr < end; addr += 0x400 * 2)
279
- cache32_unroll32(addr|ws, Index_Invalidate_I);
280
+ cache_unroll(32, kernel_cache, Index_Invalidate_I,
281
+ addr | ws, 32);
280282 }
281283
282284 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
....@@ -302,12 +304,14 @@
302304 /* I'm in even chunk. blast odd chunks */
303305 for (ws = 0; ws < ws_end; ws += ws_inc)
304306 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
305
- cache32_unroll32(addr|ws, Index_Invalidate_I);
307
+ cache_unroll(32, kernel_cache, Index_Invalidate_I,
308
+ addr | ws, 32);
306309 CACHE32_UNROLL32_ALIGN;
307310 /* I'm in odd chunk. blast even chunks */
308311 for (ws = 0; ws < ws_end; ws += ws_inc)
309312 for (addr = start; addr < end; addr += 0x400 * 2)
310
- cache32_unroll32(addr|ws, Index_Invalidate_I);
313
+ cache_unroll(32, kernel_cache, Index_Invalidate_I,
314
+ addr | ws, 32);
311315 }
312316
313317 static void (* r4k_blast_icache_page)(unsigned long addr);
....@@ -320,7 +324,7 @@
320324 r4k_blast_icache_page = (void *)cache_noop;
321325 else if (ic_lsize == 16)
322326 r4k_blast_icache_page = blast_icache16_page;
323
- else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2)
327
+ else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2EF)
324328 r4k_blast_icache_page = loongson2_blast_icache32_page;
325329 else if (ic_lsize == 32)
326330 r4k_blast_icache_page = blast_icache32_page;
....@@ -363,13 +367,14 @@
363367 else if (ic_lsize == 16)
364368 r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
365369 else if (ic_lsize == 32) {
366
- if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
370
+ if (IS_ENABLED(CONFIG_WAR_R4600_V1_INDEX_ICACHEOP) &&
371
+ cpu_is_r4600_v1_x())
367372 r4k_blast_icache_page_indexed =
368373 blast_icache32_r4600_v1_page_indexed;
369
- else if (TX49XX_ICACHE_INDEX_INV_WAR)
374
+ else if (IS_ENABLED(CONFIG_WAR_TX49XX_ICACHE_INDEX_INV))
370375 r4k_blast_icache_page_indexed =
371376 tx49_blast_icache32_page_indexed;
372
- else if (current_cpu_type() == CPU_LOONGSON2)
377
+ else if (current_cpu_type() == CPU_LOONGSON2EF)
373378 r4k_blast_icache_page_indexed =
374379 loongson2_blast_icache32_page_indexed;
375380 else
....@@ -391,11 +396,12 @@
391396 else if (ic_lsize == 16)
392397 r4k_blast_icache = blast_icache16;
393398 else if (ic_lsize == 32) {
394
- if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
399
+ if (IS_ENABLED(CONFIG_WAR_R4600_V1_INDEX_ICACHEOP) &&
400
+ cpu_is_r4600_v1_x())
395401 r4k_blast_icache = blast_r4600_v1_icache32;
396
- else if (TX49XX_ICACHE_INDEX_INV_WAR)
402
+ else if (IS_ENABLED(CONFIG_WAR_TX49XX_ICACHE_INDEX_INV))
397403 r4k_blast_icache = tx49_blast_icache32;
398
- else if (current_cpu_type() == CPU_LOONGSON2)
404
+ else if (current_cpu_type() == CPU_LOONGSON2EF)
399405 r4k_blast_icache = loongson2_blast_icache32;
400406 else
401407 r4k_blast_icache = blast_icache32;
....@@ -465,7 +471,7 @@
465471 {
466472 unsigned long sc_lsize = cpu_scache_line_size();
467473
468
- if (current_cpu_type() != CPU_LOONGSON3)
474
+ if (current_cpu_type() != CPU_LOONGSON64)
469475 r4k_blast_scache_node = (void *)cache_noop;
470476 else if (sc_lsize == 16)
471477 r4k_blast_scache_node = blast_scache16_node;
....@@ -480,7 +486,7 @@
480486 static inline void local_r4k___flush_cache_all(void * args)
481487 {
482488 switch (current_cpu_type()) {
483
- case CPU_LOONGSON2:
489
+ case CPU_LOONGSON2EF:
484490 case CPU_R4000SC:
485491 case CPU_R4000MC:
486492 case CPU_R4400SC:
....@@ -497,7 +503,7 @@
497503 r4k_blast_scache();
498504 break;
499505
500
- case CPU_LOONGSON3:
506
+ case CPU_LOONGSON64:
501507 /* Use get_ebase_cpunum() for both NUMA=y/n */
502508 r4k_blast_scache_node(get_ebase_cpunum() >> 2);
503509 break;
....@@ -539,6 +545,9 @@
539545 {
540546 unsigned int i;
541547 const cpumask_t *mask = cpu_present_mask;
548
+
549
+ if (cpu_has_mmid)
550
+ return cpu_context(0, mm) != 0;
542551
543552 /* cpu_sibling_map[] undeclared when !CONFIG_SMP */
544553 #ifdef CONFIG_SMP
....@@ -646,8 +655,6 @@
646655 int exec = vma->vm_flags & VM_EXEC;
647656 struct mm_struct *mm = vma->vm_mm;
648657 int map_coherent = 0;
649
- pgd_t *pgdp;
650
- pud_t *pudp;
651658 pmd_t *pmdp;
652659 pte_t *ptep;
653660 void *vaddr;
....@@ -660,10 +667,8 @@
660667 return;
661668
662669 addr &= PAGE_MASK;
663
- pgdp = pgd_offset(mm, addr);
664
- pudp = pud_offset(pgdp, addr);
665
- pmdp = pmd_offset(pudp, addr);
666
- ptep = pte_offset(pmdp, addr);
670
+ pmdp = pmd_off(mm, addr);
671
+ ptep = pte_offset_kernel(pmdp, addr);
667672
668673 /*
669674 * If the page isn't marked valid, the page cannot possibly be
....@@ -697,10 +702,7 @@
697702 }
698703 if (exec) {
699704 if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
700
- int cpu = smp_processor_id();
701
-
702
- if (cpu_context(cpu, mm) != 0)
703
- drop_mmu_context(mm, cpu);
705
+ drop_mmu_context(mm);
704706 } else
705707 vaddr ? r4k_blast_icache_page(addr) :
706708 r4k_blast_icache_user_page(addr);
....@@ -770,7 +772,7 @@
770772 r4k_blast_icache();
771773 else {
772774 switch (boot_cpu_type()) {
773
- case CPU_LOONGSON2:
775
+ case CPU_LOONGSON2EF:
774776 protected_loongson2_blast_icache_range(start, end);
775777 break;
776778
....@@ -863,7 +865,7 @@
863865 preempt_disable();
864866 if (cpu_has_inclusive_pcaches) {
865867 if (size >= scache_size) {
866
- if (current_cpu_type() != CPU_LOONGSON3)
868
+ if (current_cpu_type() != CPU_LOONGSON64)
867869 r4k_blast_scache();
868870 else
869871 r4k_blast_scache_node(pa_to_nid(addr));
....@@ -895,6 +897,31 @@
895897 __sync();
896898 }
897899
900
+static void prefetch_cache_inv(unsigned long addr, unsigned long size)
901
+{
902
+ unsigned int linesz = cpu_scache_line_size();
903
+ unsigned long addr0 = addr, addr1;
904
+
905
+ addr0 &= ~(linesz - 1);
906
+ addr1 = (addr0 + size - 1) & ~(linesz - 1);
907
+
908
+ protected_writeback_scache_line(addr0);
909
+ if (likely(addr1 != addr0))
910
+ protected_writeback_scache_line(addr1);
911
+ else
912
+ return;
913
+
914
+ addr0 += linesz;
915
+ if (likely(addr1 != addr0))
916
+ protected_writeback_scache_line(addr0);
917
+ else
918
+ return;
919
+
920
+ addr1 -= linesz;
921
+ if (likely(addr1 > addr0))
922
+ protected_writeback_scache_line(addr0);
923
+}
924
+
898925 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
899926 {
900927 /* Catch bad driver code */
....@@ -902,9 +929,13 @@
902929 return;
903930
904931 preempt_disable();
932
+
933
+ if (current_cpu_type() == CPU_BMIPS5000)
934
+ prefetch_cache_inv(addr, size);
935
+
905936 if (cpu_has_inclusive_pcaches) {
906937 if (size >= scache_size) {
907
- if (current_cpu_type() != CPU_LOONGSON3)
938
+ if (current_cpu_type() != CPU_LOONGSON64)
908939 r4k_blast_scache();
909940 else
910941 r4k_blast_scache_node(pa_to_nid(addr));
....@@ -936,119 +967,6 @@
936967 __sync();
937968 }
938969 #endif /* CONFIG_DMA_NONCOHERENT */
939
-
940
-struct flush_cache_sigtramp_args {
941
- struct mm_struct *mm;
942
- struct page *page;
943
- unsigned long addr;
944
-};
945
-
946
-/*
947
- * While we're protected against bad userland addresses we don't care
948
- * very much about what happens in that case. Usually a segmentation
949
- * fault will dump the process later on anyway ...
950
- */
951
-static void local_r4k_flush_cache_sigtramp(void *args)
952
-{
953
- struct flush_cache_sigtramp_args *fcs_args = args;
954
- unsigned long addr = fcs_args->addr;
955
- struct page *page = fcs_args->page;
956
- struct mm_struct *mm = fcs_args->mm;
957
- int map_coherent = 0;
958
- void *vaddr;
959
-
960
- unsigned long ic_lsize = cpu_icache_line_size();
961
- unsigned long dc_lsize = cpu_dcache_line_size();
962
- unsigned long sc_lsize = cpu_scache_line_size();
963
-
964
- /*
965
- * If owns no valid ASID yet, cannot possibly have gotten
966
- * this page into the cache.
967
- */
968
- if (!has_valid_asid(mm, R4K_HIT))
969
- return;
970
-
971
- if (mm == current->active_mm) {
972
- vaddr = NULL;
973
- } else {
974
- /*
975
- * Use kmap_coherent or kmap_atomic to do flushes for
976
- * another ASID than the current one.
977
- */
978
- map_coherent = (cpu_has_dc_aliases &&
979
- page_mapcount(page) &&
980
- !Page_dcache_dirty(page));
981
- if (map_coherent)
982
- vaddr = kmap_coherent(page, addr);
983
- else
984
- vaddr = kmap_atomic(page);
985
- addr = (unsigned long)vaddr + (addr & ~PAGE_MASK);
986
- }
987
-
988
- R4600_HIT_CACHEOP_WAR_IMPL;
989
- if (!cpu_has_ic_fills_f_dc) {
990
- if (dc_lsize)
991
- vaddr ? flush_dcache_line(addr & ~(dc_lsize - 1))
992
- : protected_writeback_dcache_line(
993
- addr & ~(dc_lsize - 1));
994
- if (!cpu_icache_snoops_remote_store && scache_size)
995
- vaddr ? flush_scache_line(addr & ~(sc_lsize - 1))
996
- : protected_writeback_scache_line(
997
- addr & ~(sc_lsize - 1));
998
- }
999
- if (ic_lsize)
1000
- vaddr ? flush_icache_line(addr & ~(ic_lsize - 1))
1001
- : protected_flush_icache_line(addr & ~(ic_lsize - 1));
1002
-
1003
- if (vaddr) {
1004
- if (map_coherent)
1005
- kunmap_coherent();
1006
- else
1007
- kunmap_atomic(vaddr);
1008
- }
1009
-
1010
- if (MIPS4K_ICACHE_REFILL_WAR) {
1011
- __asm__ __volatile__ (
1012
- ".set push\n\t"
1013
- ".set noat\n\t"
1014
- ".set "MIPS_ISA_LEVEL"\n\t"
1015
-#ifdef CONFIG_32BIT
1016
- "la $at,1f\n\t"
1017
-#endif
1018
-#ifdef CONFIG_64BIT
1019
- "dla $at,1f\n\t"
1020
-#endif
1021
- "cache %0,($at)\n\t"
1022
- "nop; nop; nop\n"
1023
- "1:\n\t"
1024
- ".set pop"
1025
- :
1026
- : "i" (Hit_Invalidate_I));
1027
- }
1028
- if (MIPS_CACHE_SYNC_WAR)
1029
- __asm__ __volatile__ ("sync");
1030
-}
1031
-
1032
-static void r4k_flush_cache_sigtramp(unsigned long addr)
1033
-{
1034
- struct flush_cache_sigtramp_args args;
1035
- int npages;
1036
-
1037
- down_read(&current->mm->mmap_sem);
1038
-
1039
- npages = get_user_pages_fast(addr, 1, 0, &args.page);
1040
- if (npages < 1)
1041
- goto out;
1042
-
1043
- args.mm = current->mm;
1044
- args.addr = addr;
1045
-
1046
- r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_sigtramp, &args);
1047
-
1048
- put_page(args.page);
1049
-out:
1050
- up_read(&current->mm->mmap_sem);
1051
-}
1052970
1053971 static void r4k_flush_icache_all(void)
1054972 {
....@@ -1127,7 +1045,7 @@
11271045 "cache\t%1, 0x3000(%0)\n\t"
11281046 ".set pop\n"
11291047 :
1130
- : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
1048
+ : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill_I));
11311049 }
11321050 }
11331051
....@@ -1151,12 +1069,12 @@
11511069 if (rev <= PRID_REV_ENCODE_332(2, 4, 0))
11521070 present = 1;
11531071 if (rev == PRID_REV_ENCODE_332(2, 4, 0))
1154
- write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
1072
+ write_c0_config6(read_c0_config6() | MTI_CONF6_SYND);
11551073 break;
11561074 case PRID_IMP_1074K:
11571075 if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) {
11581076 present = 1;
1159
- write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
1077
+ write_c0_config6(read_c0_config6() | MTI_CONF6_SYND);
11601078 }
11611079 break;
11621080 default:
....@@ -1211,7 +1129,6 @@
12111129 c->options |= MIPS_CPU_CACHE_CDEX_P;
12121130 break;
12131131
1214
- case CPU_R5432:
12151132 case CPU_R5500:
12161133 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
12171134 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
....@@ -1247,7 +1164,6 @@
12471164 case CPU_R4400PC:
12481165 case CPU_R4400SC:
12491166 case CPU_R4400MC:
1250
- case CPU_R4300:
12511167 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
12521168 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
12531169 c->icache.ways = 1;
....@@ -1280,6 +1196,7 @@
12801196
12811197 case CPU_VR4133:
12821198 write_c0_config(config & ~VR41_CONF_P4K);
1199
+ fallthrough;
12831200 case CPU_VR4131:
12841201 /* Workaround for cache instruction bug of VR4131 */
12851202 if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
....@@ -1338,7 +1255,7 @@
13381255 c->options |= MIPS_CPU_PREFETCH;
13391256 break;
13401257
1341
- case CPU_LOONGSON2:
1258
+ case CPU_LOONGSON2EF:
13421259 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
13431260 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
13441261 if (prid & 0x3)
....@@ -1356,7 +1273,7 @@
13561273 c->dcache.waybit = 0;
13571274 break;
13581275
1359
- case CPU_LOONGSON3:
1276
+ case CPU_LOONGSON64:
13601277 config1 = read_c0_config1();
13611278 lsize = (config1 >> 19) & 7;
13621279 if (lsize)
....@@ -1381,7 +1298,9 @@
13811298 c->dcache.ways *
13821299 c->dcache.linesz;
13831300 c->dcache.waybit = 0;
1384
- if ((prid & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2)
1301
+ if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >=
1302
+ (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) ||
1303
+ (c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
13851304 c->options |= MIPS_CPU_PREFETCH;
13861305 break;
13871306
....@@ -1503,7 +1422,7 @@
15031422 case CPU_74K:
15041423 case CPU_1074K:
15051424 has_74k_erratum = alias_74k_erratum(c);
1506
- /* Fall through. */
1425
+ fallthrough;
15071426 case CPU_M14KC:
15081427 case CPU_M14KEC:
15091428 case CPU_24K:
....@@ -1527,6 +1446,7 @@
15271446 c->dcache.flags |= MIPS_CACHE_PINDEX;
15281447 break;
15291448 }
1449
+ fallthrough;
15301450 default:
15311451 if (has_74k_erratum || c->dcache.waysize > PAGE_SIZE)
15321452 c->dcache.flags |= MIPS_CACHE_ALIASES;
....@@ -1565,7 +1485,7 @@
15651485 c->dcache.flags &= ~MIPS_CACHE_ALIASES;
15661486 break;
15671487
1568
- case CPU_LOONGSON2:
1488
+ case CPU_LOONGSON2EF:
15691489 /*
15701490 * LOONGSON2 has 4 way icache, but when using indexed cache op,
15711491 * one op will act on all 4 ways
....@@ -1573,17 +1493,17 @@
15731493 c->icache.ways = 1;
15741494 }
15751495
1576
- printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1577
- icache_size >> 10,
1578
- c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
1579
- way_string[c->icache.ways], c->icache.linesz);
1496
+ pr_info("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1497
+ icache_size >> 10,
1498
+ c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
1499
+ way_string[c->icache.ways], c->icache.linesz);
15801500
1581
- printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1582
- dcache_size >> 10, way_string[c->dcache.ways],
1583
- (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
1584
- (c->dcache.flags & MIPS_CACHE_ALIASES) ?
1501
+ pr_info("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1502
+ dcache_size >> 10, way_string[c->dcache.ways],
1503
+ (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
1504
+ (c->dcache.flags & MIPS_CACHE_ALIASES) ?
15851505 "cache aliases" : "no aliases",
1586
- c->dcache.linesz);
1506
+ c->dcache.linesz);
15871507 }
15881508
15891509 static void probe_vcache(void)
....@@ -1591,7 +1511,7 @@
15911511 struct cpuinfo_mips *c = &current_cpu_data;
15921512 unsigned int config2, lsize;
15931513
1594
- if (current_cpu_type() != CPU_LOONGSON3)
1514
+ if (current_cpu_type() != CPU_LOONGSON64)
15951515 return;
15961516
15971517 config2 = read_c0_config2();
....@@ -1689,7 +1609,7 @@
16891609 c->options |= MIPS_CPU_INCLUSIVE_CACHES;
16901610 }
16911611
1692
-static void __init loongson3_sc_init(void)
1612
+static void loongson3_sc_init(void)
16931613 {
16941614 struct cpuinfo_mips *c = &current_cpu_data;
16951615 unsigned int config2, lsize;
....@@ -1706,8 +1626,13 @@
17061626 scache_size = c->scache.sets *
17071627 c->scache.ways *
17081628 c->scache.linesz;
1709
- /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
1710
- scache_size *= 4;
1629
+
1630
+ /* Loongson-3 has 4-Scache banks, while Loongson-2K have only 2 banks */
1631
+ if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
1632
+ scache_size *= 2;
1633
+ else
1634
+ scache_size *= 4;
1635
+
17111636 c->scache.waybit = 0;
17121637 c->scache.waysize = scache_size / c->scache.ways;
17131638 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
....@@ -1766,11 +1691,11 @@
17661691 #endif
17671692 return;
17681693
1769
- case CPU_LOONGSON2:
1694
+ case CPU_LOONGSON2EF:
17701695 loongson2_sc_init();
17711696 return;
17721697
1773
- case CPU_LOONGSON3:
1698
+ case CPU_LOONGSON64:
17741699 loongson3_sc_init();
17751700 return;
17761701
....@@ -1780,9 +1705,10 @@
17801705 return;
17811706
17821707 default:
1783
- if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
1784
- MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
1785
- MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)) {
1708
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
1709
+ MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
1710
+ MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
1711
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
17861712 #ifdef CONFIG_MIPS_CPU_SCACHE
17871713 if (mips_sc_init ()) {
17881714 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
....@@ -1980,7 +1906,6 @@
19801906
19811907 __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
19821908
1983
- flush_cache_sigtramp = r4k_flush_cache_sigtramp;
19841909 flush_icache_all = r4k_flush_icache_all;
19851910 local_flush_data_cache_page = local_r4k_flush_data_cache_page;
19861911 flush_data_cache_page = r4k_flush_data_cache_page;
....@@ -1989,22 +1914,21 @@
19891914 __flush_icache_user_range = r4k_flush_icache_user_range;
19901915 __local_flush_icache_user_range = local_r4k_flush_icache_user_range;
19911916
1992
-#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
1993
-# if defined(CONFIG_DMA_PERDEV_COHERENT)
1994
- if (0) {
1995
-# else
1996
- if ((coherentio == IO_COHERENCE_ENABLED) ||
1997
- ((coherentio == IO_COHERENCE_DEFAULT) && hw_coherentio)) {
1998
-# endif
1917
+#ifdef CONFIG_DMA_NONCOHERENT
1918
+#ifdef CONFIG_DMA_MAYBE_COHERENT
1919
+ if (coherentio == IO_COHERENCE_ENABLED ||
1920
+ (coherentio == IO_COHERENCE_DEFAULT && hw_coherentio)) {
19991921 _dma_cache_wback_inv = (void *)cache_noop;
20001922 _dma_cache_wback = (void *)cache_noop;
20011923 _dma_cache_inv = (void *)cache_noop;
2002
- } else {
1924
+ } else
1925
+#endif /* CONFIG_DMA_MAYBE_COHERENT */
1926
+ {
20031927 _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
20041928 _dma_cache_wback = r4k_dma_cache_wback_inv;
20051929 _dma_cache_inv = r4k_dma_cache_inv;
20061930 }
2007
-#endif
1931
+#endif /* CONFIG_DMA_NONCOHERENT */
20081932
20091933 build_clear_page();
20101934 build_copy_page();
....@@ -2036,7 +1960,6 @@
20361960 /* I$ fills from D$ just by emptying the write buffers */
20371961 flush_cache_page = (void *)b5k_instruction_hazard;
20381962 flush_cache_range = (void *)b5k_instruction_hazard;
2039
- flush_cache_sigtramp = (void *)b5k_instruction_hazard;
20401963 local_flush_data_cache_page = (void *)b5k_instruction_hazard;
20411964 flush_data_cache_page = (void *)b5k_instruction_hazard;
20421965 flush_icache_range = (void *)b5k_instruction_hazard;
....@@ -2046,7 +1969,7 @@
20461969 /* Optimization: an L2 flush implicitly flushes the L1 */
20471970 current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES;
20481971 break;
2049
- case CPU_LOONGSON3:
1972
+ case CPU_LOONGSON64:
20501973 /* Loongson-3 maintains cache coherency by hardware */
20511974 __flush_cache_all = cache_noop;
20521975 __flush_cache_vmap = cache_noop;
....@@ -2055,7 +1978,6 @@
20551978 flush_cache_mm = (void *)cache_noop;
20561979 flush_cache_page = (void *)cache_noop;
20571980 flush_cache_range = (void *)cache_noop;
2058
- flush_cache_sigtramp = (void *)cache_noop;
20591981 flush_icache_all = (void *)cache_noop;
20601982 flush_data_cache_page = (void *)cache_noop;
20611983 local_flush_data_cache_page = (void *)cache_noop;