forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/arch/sh/mm/ioremap.c
....@@ -18,12 +18,59 @@
1818 #include <linux/mm.h>
1919 #include <linux/pci.h>
2020 #include <linux/io.h>
21
+#include <asm/io_trapped.h>
2122 #include <asm/page.h>
2223 #include <asm/pgalloc.h>
2324 #include <asm/addrspace.h>
2425 #include <asm/cacheflush.h>
2526 #include <asm/tlbflush.h>
2627 #include <asm/mmu.h>
28
+#include "ioremap.h"
29
+
30
+/*
31
+ * On 32-bit SH, we traditionally have the whole physical address space mapped
32
+ * at all times (as MIPS does), so "ioremap()" and "iounmap()" do not need to do
33
+ * anything but place the address in the proper segment. This is true for P1
34
+ * and P2 addresses, as well as some P3 ones. However, most of the P3 addresses
35
+ * and newer cores using extended addressing need to map through page tables, so
36
+ * the ioremap() implementation becomes a bit more complicated.
37
+ */
38
+#ifdef CONFIG_29BIT
39
+static void __iomem *
40
+__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
41
+{
42
+ phys_addr_t last_addr = offset + size - 1;
43
+
44
+ /*
45
+ * For P1 and P2 space this is trivial, as everything is already
46
+ * mapped. Uncached access for P1 addresses are done through P2.
47
+ * In the P3 case or for addresses outside of the 29-bit space,
48
+ * mapping must be done by the PMB or by using page tables.
49
+ */
50
+ if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
51
+ u64 flags = pgprot_val(prot);
52
+
53
+ /*
54
+ * Anything using the legacy PTEA space attributes needs
55
+ * to be kicked down to page table mappings.
56
+ */
57
+ if (unlikely(flags & _PAGE_PCC_MASK))
58
+ return NULL;
59
+ if (unlikely(flags & _PAGE_CACHABLE))
60
+ return (void __iomem *)P1SEGADDR(offset);
61
+
62
+ return (void __iomem *)P2SEGADDR(offset);
63
+ }
64
+
65
+ /* P4 above the store queues are always mapped. */
66
+ if (unlikely(offset >= P3_ADDR_MAX))
67
+ return (void __iomem *)P4SEGADDR(offset);
68
+
69
+ return NULL;
70
+}
71
+#else
72
+#define __ioremap_29bit(offset, size, prot) NULL
73
+#endif /* CONFIG_29BIT */
2774
2875 /*
2976 * Remap an arbitrary physical address space into the kernel virtual
....@@ -41,6 +88,14 @@
4188 struct vm_struct *area;
4289 unsigned long offset, last_addr, addr, orig_addr;
4390 void __iomem *mapped;
91
+
92
+ mapped = __ioremap_trapped(phys_addr, size);
93
+ if (mapped)
94
+ return mapped;
95
+
96
+ mapped = __ioremap_29bit(phys_addr, size, pgprot);
97
+ if (mapped)
98
+ return mapped;
4499
45100 /* Don't allow wraparound or zero size */
46101 last_addr = phys_addr + size - 1;
....@@ -103,7 +158,7 @@
103158 return 0;
104159 }
105160
106
-void __iounmap(void __iomem *addr)
161
+void iounmap(void __iomem *addr)
107162 {
108163 unsigned long vaddr = (unsigned long __force)addr;
109164 struct vm_struct *p;
....@@ -134,4 +189,4 @@
134189
135190 kfree(p);
136191 }
137
-EXPORT_SYMBOL(__iounmap);
192
+EXPORT_SYMBOL(iounmap);