.. | .. |
---|
18 | 18 | #include <linux/mm.h> |
---|
19 | 19 | #include <linux/pci.h> |
---|
20 | 20 | #include <linux/io.h> |
---|
| 21 | +#include <asm/io_trapped.h> |
---|
21 | 22 | #include <asm/page.h> |
---|
22 | 23 | #include <asm/pgalloc.h> |
---|
23 | 24 | #include <asm/addrspace.h> |
---|
24 | 25 | #include <asm/cacheflush.h> |
---|
25 | 26 | #include <asm/tlbflush.h> |
---|
26 | 27 | #include <asm/mmu.h> |
---|
| 28 | +#include "ioremap.h" |
---|
| 29 | + |
---|
| 30 | +/* |
---|
| 31 | + * On 32-bit SH, we traditionally have the whole physical address space mapped |
---|
| 32 | + * at all times (as MIPS does), so "ioremap()" and "iounmap()" do not need to do |
---|
| 33 | + * anything but place the address in the proper segment. This is true for P1 |
---|
| 34 | + * and P2 addresses, as well as some P3 ones. However, most of the P3 addresses |
---|
| 35 | + * and newer cores using extended addressing need to map through page tables, so |
---|
| 36 | + * the ioremap() implementation becomes a bit more complicated. |
---|
| 37 | + */ |
---|
| 38 | +#ifdef CONFIG_29BIT |
---|
| 39 | +static void __iomem * |
---|
| 40 | +__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot) |
---|
| 41 | +{ |
---|
| 42 | + phys_addr_t last_addr = offset + size - 1; |
---|
| 43 | + |
---|
| 44 | + /* |
---|
| 45 | + * For P1 and P2 space this is trivial, as everything is already |
---|
| 46 | + * mapped. Uncached access for P1 addresses are done through P2. |
---|
| 47 | + * In the P3 case or for addresses outside of the 29-bit space, |
---|
| 48 | + * mapping must be done by the PMB or by using page tables. |
---|
| 49 | + */ |
---|
| 50 | + if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) { |
---|
| 51 | + u64 flags = pgprot_val(prot); |
---|
| 52 | + |
---|
| 53 | + /* |
---|
| 54 | + * Anything using the legacy PTEA space attributes needs |
---|
| 55 | + * to be kicked down to page table mappings. |
---|
| 56 | + */ |
---|
| 57 | + if (unlikely(flags & _PAGE_PCC_MASK)) |
---|
| 58 | + return NULL; |
---|
| 59 | + if (unlikely(flags & _PAGE_CACHABLE)) |
---|
| 60 | + return (void __iomem *)P1SEGADDR(offset); |
---|
| 61 | + |
---|
| 62 | + return (void __iomem *)P2SEGADDR(offset); |
---|
| 63 | + } |
---|
| 64 | + |
---|
| 65 | + /* P4 above the store queues are always mapped. */ |
---|
| 66 | + if (unlikely(offset >= P3_ADDR_MAX)) |
---|
| 67 | + return (void __iomem *)P4SEGADDR(offset); |
---|
| 68 | + |
---|
| 69 | + return NULL; |
---|
| 70 | +} |
---|
| 71 | +#else |
---|
| 72 | +#define __ioremap_29bit(offset, size, prot) NULL |
---|
| 73 | +#endif /* CONFIG_29BIT */ |
---|
27 | 74 | |
---|
28 | 75 | /* |
---|
29 | 76 | * Remap an arbitrary physical address space into the kernel virtual |
---|
.. | .. |
---|
41 | 88 | struct vm_struct *area; |
---|
42 | 89 | unsigned long offset, last_addr, addr, orig_addr; |
---|
43 | 90 | void __iomem *mapped; |
---|
| 91 | + |
---|
| 92 | + mapped = __ioremap_trapped(phys_addr, size); |
---|
| 93 | + if (mapped) |
---|
| 94 | + return mapped; |
---|
| 95 | + |
---|
| 96 | + mapped = __ioremap_29bit(phys_addr, size, pgprot); |
---|
| 97 | + if (mapped) |
---|
| 98 | + return mapped; |
---|
44 | 99 | |
---|
45 | 100 | /* Don't allow wraparound or zero size */ |
---|
46 | 101 | last_addr = phys_addr + size - 1; |
---|
.. | .. |
---|
103 | 158 | return 0; |
---|
104 | 159 | } |
---|
105 | 160 | |
---|
106 | | -void __iounmap(void __iomem *addr) |
---|
| 161 | +void iounmap(void __iomem *addr) |
---|
107 | 162 | { |
---|
108 | 163 | unsigned long vaddr = (unsigned long __force)addr; |
---|
109 | 164 | struct vm_struct *p; |
---|
.. | .. |
---|
134 | 189 | |
---|
135 | 190 | kfree(p); |
---|
136 | 191 | } |
---|
137 | | -EXPORT_SYMBOL(__iounmap); |
---|
| 192 | +EXPORT_SYMBOL(iounmap); |
---|