hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/arch/mips/mm/ioremap.c
....@@ -14,90 +14,13 @@
1414 #include <linux/slab.h>
1515 #include <linux/vmalloc.h>
1616 #include <linux/mm_types.h>
17
+#include <linux/io.h>
1718 #include <asm/cacheflush.h>
18
-#include <asm/io.h>
1919 #include <asm/tlbflush.h>
20
+#include <ioremap.h>
2021
21
-static inline void remap_area_pte(pte_t * pte, unsigned long address,
22
- phys_addr_t size, phys_addr_t phys_addr, unsigned long flags)
23
-{
24
- phys_addr_t end;
25
- unsigned long pfn;
26
- pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
27
- | __WRITEABLE | flags);
28
-
29
- address &= ~PMD_MASK;
30
- end = address + size;
31
- if (end > PMD_SIZE)
32
- end = PMD_SIZE;
33
- BUG_ON(address >= end);
34
- pfn = phys_addr >> PAGE_SHIFT;
35
- do {
36
- if (!pte_none(*pte)) {
37
- printk("remap_area_pte: page already exists\n");
38
- BUG();
39
- }
40
- set_pte(pte, pfn_pte(pfn, pgprot));
41
- address += PAGE_SIZE;
42
- pfn++;
43
- pte++;
44
- } while (address && (address < end));
45
-}
46
-
47
-static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
48
- phys_addr_t size, phys_addr_t phys_addr, unsigned long flags)
49
-{
50
- phys_addr_t end;
51
-
52
- address &= ~PGDIR_MASK;
53
- end = address + size;
54
- if (end > PGDIR_SIZE)
55
- end = PGDIR_SIZE;
56
- phys_addr -= address;
57
- BUG_ON(address >= end);
58
- do {
59
- pte_t * pte = pte_alloc_kernel(pmd, address);
60
- if (!pte)
61
- return -ENOMEM;
62
- remap_area_pte(pte, address, end - address, address + phys_addr, flags);
63
- address = (address + PMD_SIZE) & PMD_MASK;
64
- pmd++;
65
- } while (address && (address < end));
66
- return 0;
67
-}
68
-
69
-static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
70
- phys_addr_t size, unsigned long flags)
71
-{
72
- int error;
73
- pgd_t * dir;
74
- unsigned long end = address + size;
75
-
76
- phys_addr -= address;
77
- dir = pgd_offset(&init_mm, address);
78
- flush_cache_all();
79
- BUG_ON(address >= end);
80
- do {
81
- pud_t *pud;
82
- pmd_t *pmd;
83
-
84
- error = -ENOMEM;
85
- pud = pud_alloc(&init_mm, dir, address);
86
- if (!pud)
87
- break;
88
- pmd = pmd_alloc(&init_mm, pud, address);
89
- if (!pmd)
90
- break;
91
- if (remap_area_pmd(pmd, address, end - address,
92
- phys_addr + address, flags))
93
- break;
94
- error = 0;
95
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
96
- dir++;
97
- } while (address && (address < end));
98
- flush_tlb_all();
99
- return error;
100
-}
22
+#define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
23
+#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
10124
10225 static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
10326 void *arg)
....@@ -114,27 +37,25 @@
11437 }
11538
11639 /*
117
- * Generic mapping function (not visible outside):
118
- */
119
-
120
-/*
121
- * Remap an arbitrary physical address space into the kernel virtual
122
- * address space. Needed when the kernel wants to access high addresses
123
- * directly.
40
+ * ioremap_prot - map bus memory into CPU space
41
+ * @phys_addr: bus address of the memory
42
+ * @size: size of the resource to map
12443 *
125
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
126
- * have to convert them into an offset in a page-aligned mapping, but the
127
- * caller shouldn't need to know that small detail.
44
+ * ioremap_prot gives the caller control over cache coherency attributes (CCA)
12845 */
129
-
130
-#define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
131
-
132
-void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
46
+void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size,
47
+ unsigned long prot_val)
13348 {
49
+ unsigned long flags = prot_val & _CACHE_MASK;
13450 unsigned long offset, pfn, last_pfn;
135
- struct vm_struct * area;
51
+ struct vm_struct *area;
13652 phys_addr_t last_addr;
137
- void * addr;
53
+ unsigned long vaddr;
54
+ void __iomem *cpu_addr;
55
+
56
+ cpu_addr = plat_ioremap(phys_addr, size, flags);
57
+ if (cpu_addr)
58
+ return cpu_addr;
13859
13960 phys_addr = fixup_bigphys_addr(phys_addr, size);
14061
....@@ -177,30 +98,22 @@
17798 area = get_vm_area(size, VM_IOREMAP);
17899 if (!area)
179100 return NULL;
180
- addr = area->addr;
181
- if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
182
- vunmap(addr);
101
+ vaddr = (unsigned long)area->addr;
102
+
103
+ flags |= _PAGE_GLOBAL | _PAGE_PRESENT | __READABLE | __WRITEABLE;
104
+ if (ioremap_page_range(vaddr, vaddr + size, phys_addr,
105
+ __pgprot(flags))) {
106
+ free_vm_area(area);
183107 return NULL;
184108 }
185109
186
- return (void __iomem *) (offset + (char *)addr);
110
+ return (void __iomem *)(vaddr + offset);
187111 }
112
+EXPORT_SYMBOL(ioremap_prot);
188113
189
-#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
190
-
191
-void __iounmap(const volatile void __iomem *addr)
114
+void iounmap(const volatile void __iomem *addr)
192115 {
193
- struct vm_struct *p;
194
-
195
- if (IS_KSEG1(addr))
196
- return;
197
-
198
- p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
199
- if (!p)
200
- printk(KERN_ERR "iounmap: bad address %p\n", addr);
201
-
202
- kfree(p);
116
+ if (!plat_iounmap(addr) && !IS_KSEG1(addr))
117
+ vunmap((void *)((unsigned long)addr & PAGE_MASK));
203118 }
204
-
205
-EXPORT_SYMBOL(__ioremap);
206
-EXPORT_SYMBOL(__iounmap);
119
+EXPORT_SYMBOL(iounmap);