| .. | .. |
|---|
| 17 | 17 | #include <asm/cache.h> |
|---|
| 18 | 18 | #include <asm/addrspace.h> |
|---|
| 19 | 19 | #include <asm/machvec.h> |
|---|
| 20 | | -#include <asm/pgtable.h> |
|---|
| 20 | +#include <asm/page.h> |
|---|
| 21 | +#include <linux/pgtable.h> |
|---|
| 21 | 22 | #include <asm-generic/iomap.h> |
|---|
| 22 | 23 | |
|---|
| 23 | | -#ifdef __KERNEL__ |
|---|
| 24 | 24 | #define __IO_PREFIX generic |
|---|
| 25 | 25 | #include <asm/io_generic.h> |
|---|
| 26 | | -#include <asm/io_trapped.h> |
|---|
| 26 | +#include <asm-generic/pci_iomap.h> |
|---|
| 27 | 27 | #include <mach/mangle-port.h> |
|---|
| 28 | 28 | |
|---|
| 29 | 29 | #define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v)) |
|---|
| .. | .. |
|---|
| 114 | 114 | __BUILD_MEMORY_STRING(__raw_, b, u8) |
|---|
| 115 | 115 | __BUILD_MEMORY_STRING(__raw_, w, u16) |
|---|
| 116 | 116 | |
|---|
| 117 | | -#ifdef CONFIG_SUPERH32 |
|---|
| 118 | 117 | void __raw_writesl(void __iomem *addr, const void *data, int longlen); |
|---|
| 119 | 118 | void __raw_readsl(const void __iomem *addr, void *data, int longlen); |
|---|
| 120 | | -#else |
|---|
| 121 | | -__BUILD_MEMORY_STRING(__raw_, l, u32) |
|---|
| 122 | | -#endif |
|---|
| 123 | 119 | |
|---|
| 124 | 120 | __BUILD_MEMORY_STRING(__raw_, q, u64) |
|---|
| 125 | 121 | |
|---|
| .. | .. |
|---|
| 228 | 224 | |
|---|
| 229 | 225 | #define IO_SPACE_LIMIT 0xffffffff |
|---|
| 230 | 226 | |
|---|
| 231 | | -/* synco on SH-4A, otherwise a nop */ |
|---|
| 232 | | -#define mmiowb() wmb() |
|---|
| 233 | | - |
|---|
| 234 | 227 | /* We really want to try and get these to memcpy etc */ |
|---|
| 235 | 228 | void memcpy_fromio(void *, const volatile void __iomem *, unsigned long); |
|---|
| 236 | 229 | void memcpy_toio(volatile void __iomem *, const void *, unsigned long); |
|---|
| .. | .. |
|---|
| 249 | 242 | #define phys_to_virt(address) (__va(address)) |
|---|
| 250 | 243 | #endif |
|---|
| 251 | 244 | |
|---|
| 252 | | -/* |
|---|
| 253 | | - * On 32-bit SH, we traditionally have the whole physical address space |
|---|
| 254 | | - * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do |
|---|
| 255 | | - * not need to do anything but place the address in the proper segment. |
|---|
| 256 | | - * This is true for P1 and P2 addresses, as well as some P3 ones. |
|---|
| 257 | | - * However, most of the P3 addresses and newer cores using extended |
|---|
| 258 | | - * addressing need to map through page tables, so the ioremap() |
|---|
| 259 | | - * implementation becomes a bit more complicated. |
|---|
| 260 | | - * |
|---|
| 261 | | - * See arch/sh/mm/ioremap.c for additional notes on this. |
|---|
| 262 | | - * |
|---|
| 263 | | - * We cheat a bit and always return uncachable areas until we've fixed |
|---|
| 264 | | - * the drivers to handle caching properly. |
|---|
| 265 | | - * |
|---|
| 266 | | - * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply |
|---|
| 267 | | - * doesn't exist, so everything must go through page tables. |
|---|
| 268 | | - */ |
|---|
| 269 | 245 | #ifdef CONFIG_MMU |
|---|
| 246 | +void iounmap(void __iomem *addr); |
|---|
| 270 | 247 | void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size, |
|---|
| 271 | 248 | pgprot_t prot, void *caller); |
|---|
| 272 | | -void __iounmap(void __iomem *addr); |
|---|
| 273 | | - |
|---|
| 274 | | -static inline void __iomem * |
|---|
| 275 | | -__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot) |
|---|
| 276 | | -{ |
|---|
| 277 | | - return __ioremap_caller(offset, size, prot, __builtin_return_address(0)); |
|---|
| 278 | | -} |
|---|
| 279 | | - |
|---|
| 280 | | -static inline void __iomem * |
|---|
| 281 | | -__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot) |
|---|
| 282 | | -{ |
|---|
| 283 | | -#ifdef CONFIG_29BIT |
|---|
| 284 | | - phys_addr_t last_addr = offset + size - 1; |
|---|
| 285 | | - |
|---|
| 286 | | - /* |
|---|
| 287 | | - * For P1 and P2 space this is trivial, as everything is already |
|---|
| 288 | | - * mapped. Uncached access for P1 addresses are done through P2. |
|---|
| 289 | | - * In the P3 case or for addresses outside of the 29-bit space, |
|---|
| 290 | | - * mapping must be done by the PMB or by using page tables. |
|---|
| 291 | | - */ |
|---|
| 292 | | - if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) { |
|---|
| 293 | | - u64 flags = pgprot_val(prot); |
|---|
| 294 | | - |
|---|
| 295 | | - /* |
|---|
| 296 | | - * Anything using the legacy PTEA space attributes needs |
|---|
| 297 | | - * to be kicked down to page table mappings. |
|---|
| 298 | | - */ |
|---|
| 299 | | - if (unlikely(flags & _PAGE_PCC_MASK)) |
|---|
| 300 | | - return NULL; |
|---|
| 301 | | - if (unlikely(flags & _PAGE_CACHABLE)) |
|---|
| 302 | | - return (void __iomem *)P1SEGADDR(offset); |
|---|
| 303 | | - |
|---|
| 304 | | - return (void __iomem *)P2SEGADDR(offset); |
|---|
| 305 | | - } |
|---|
| 306 | | - |
|---|
| 307 | | - /* P4 above the store queues are always mapped. */ |
|---|
| 308 | | - if (unlikely(offset >= P3_ADDR_MAX)) |
|---|
| 309 | | - return (void __iomem *)P4SEGADDR(offset); |
|---|
| 310 | | -#endif |
|---|
| 311 | | - |
|---|
| 312 | | - return NULL; |
|---|
| 313 | | -} |
|---|
| 314 | | - |
|---|
| 315 | | -static inline void __iomem * |
|---|
| 316 | | -__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot) |
|---|
| 317 | | -{ |
|---|
| 318 | | - void __iomem *ret; |
|---|
| 319 | | - |
|---|
| 320 | | - ret = __ioremap_trapped(offset, size); |
|---|
| 321 | | - if (ret) |
|---|
| 322 | | - return ret; |
|---|
| 323 | | - |
|---|
| 324 | | - ret = __ioremap_29bit(offset, size, prot); |
|---|
| 325 | | - if (ret) |
|---|
| 326 | | - return ret; |
|---|
| 327 | | - |
|---|
| 328 | | - return __ioremap(offset, size, prot); |
|---|
| 329 | | -} |
|---|
| 330 | | -#else |
|---|
| 331 | | -#define __ioremap(offset, size, prot) ((void __iomem *)(offset)) |
|---|
| 332 | | -#define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset)) |
|---|
| 333 | | -#define __iounmap(addr) do { } while (0) |
|---|
| 334 | | -#endif /* CONFIG_MMU */ |
|---|
| 335 | 249 | |
|---|
| 336 | 250 | static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) |
|---|
| 337 | 251 | { |
|---|
| 338 | | - return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE); |
|---|
| 252 | + return __ioremap_caller(offset, size, PAGE_KERNEL_NOCACHE, |
|---|
| 253 | + __builtin_return_address(0)); |
|---|
| 339 | 254 | } |
|---|
| 340 | 255 | |
|---|
| 341 | 256 | static inline void __iomem * |
|---|
| 342 | 257 | ioremap_cache(phys_addr_t offset, unsigned long size) |
|---|
| 343 | 258 | { |
|---|
| 344 | | - return __ioremap_mode(offset, size, PAGE_KERNEL); |
|---|
| 259 | + return __ioremap_caller(offset, size, PAGE_KERNEL, |
|---|
| 260 | + __builtin_return_address(0)); |
|---|
| 345 | 261 | } |
|---|
| 346 | 262 | #define ioremap_cache ioremap_cache |
|---|
| 347 | 263 | |
|---|
| 348 | 264 | #ifdef CONFIG_HAVE_IOREMAP_PROT |
|---|
| 349 | | -static inline void __iomem * |
|---|
| 350 | | -ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags) |
|---|
| 265 | +static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, |
|---|
| 266 | + unsigned long flags) |
|---|
| 351 | 267 | { |
|---|
| 352 | | - return __ioremap_mode(offset, size, __pgprot(flags)); |
|---|
| 268 | + return __ioremap_caller(offset, size, __pgprot(flags), |
|---|
| 269 | + __builtin_return_address(0)); |
|---|
| 353 | 270 | } |
|---|
| 354 | | -#endif |
|---|
| 271 | +#endif /* CONFIG_HAVE_IOREMAP_PROT */ |
|---|
| 355 | 272 | |
|---|
| 356 | | -#ifdef CONFIG_IOREMAP_FIXED |
|---|
| 357 | | -extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t); |
|---|
| 358 | | -extern int iounmap_fixed(void __iomem *); |
|---|
| 359 | | -extern void ioremap_fixed_init(void); |
|---|
| 360 | | -#else |
|---|
| 361 | | -static inline void __iomem * |
|---|
| 362 | | -ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot) |
|---|
| 273 | +#else /* CONFIG_MMU */ |
|---|
| 274 | +static inline void __iomem *ioremap(phys_addr_t offset, size_t size) |
|---|
| 363 | 275 | { |
|---|
| 364 | | - BUG(); |
|---|
| 365 | | - return NULL; |
|---|
| 276 | + return (void __iomem *)(unsigned long)offset; |
|---|
| 366 | 277 | } |
|---|
| 367 | 278 | |
|---|
| 368 | | -static inline void ioremap_fixed_init(void) { } |
|---|
| 369 | | -static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; } |
|---|
| 370 | | -#endif |
|---|
| 279 | +static inline void iounmap(volatile void __iomem *addr) { } |
|---|
| 280 | +#endif /* CONFIG_MMU */ |
|---|
| 371 | 281 | |
|---|
| 372 | | -#define ioremap_nocache ioremap |
|---|
| 373 | 282 | #define ioremap_uc ioremap |
|---|
| 374 | | - |
|---|
| 375 | | -static inline void iounmap(void __iomem *addr) |
|---|
| 376 | | -{ |
|---|
| 377 | | - __iounmap(addr); |
|---|
| 378 | | -} |
|---|
| 379 | 283 | |
|---|
| 380 | 284 | /* |
|---|
| 381 | 285 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem |
|---|
| .. | .. |
|---|
| 391 | 295 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE |
|---|
| 392 | 296 | int valid_phys_addr_range(phys_addr_t addr, size_t size); |
|---|
| 393 | 297 | int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); |
|---|
| 394 | | - |
|---|
| 395 | | -#endif /* __KERNEL__ */ |
|---|
| 396 | 298 | |
|---|
| 397 | 299 | #endif /* __ASM_SH_IO_H */ |
|---|