.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ |
---|
1 | 2 | /* Generic I/O port emulation. |
---|
2 | 3 | * |
---|
3 | 4 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
---|
4 | 5 | * Written by David Howells (dhowells@redhat.com) |
---|
5 | | - * |
---|
6 | | - * This program is free software; you can redistribute it and/or |
---|
7 | | - * modify it under the terms of the GNU General Public Licence |
---|
8 | | - * as published by the Free Software Foundation; either version |
---|
9 | | - * 2 of the Licence, or (at your option) any later version. |
---|
10 | 6 | */ |
---|
11 | 7 | #ifndef __ASM_GENERIC_IO_H |
---|
12 | 8 | #define __ASM_GENERIC_IO_H |
---|
.. | .. |
---|
19 | 15 | #include <asm-generic/iomap.h> |
---|
20 | 16 | #endif |
---|
21 | 17 | |
---|
| 18 | +#include <asm/mmiowb.h> |
---|
22 | 19 | #include <asm-generic/pci_iomap.h> |
---|
23 | | - |
---|
24 | | -#ifndef mmiowb |
---|
25 | | -#define mmiowb() do {} while (0) |
---|
26 | | -#endif |
---|
27 | 20 | |
---|
28 | 21 | #ifndef __io_br |
---|
29 | 22 | #define __io_br() barrier() |
---|
.. | .. |
---|
32 | 25 | /* prevent prefetching of coherent DMA data ahead of a dma-complete */ |
---|
33 | 26 | #ifndef __io_ar |
---|
34 | 27 | #ifdef rmb |
---|
35 | | -#define __io_ar() rmb() |
---|
| 28 | +#define __io_ar(v) rmb() |
---|
36 | 29 | #else |
---|
37 | | -#define __io_ar() barrier() |
---|
| 30 | +#define __io_ar(v) barrier() |
---|
38 | 31 | #endif |
---|
39 | 32 | #endif |
---|
40 | 33 | |
---|
.. | .. |
---|
49 | 42 | |
---|
50 | 43 | /* serialize device access against a spin_unlock, usually handled there. */ |
---|
51 | 44 | #ifndef __io_aw |
---|
52 | | -#define __io_aw() barrier() |
---|
| 45 | +#define __io_aw() mmiowb_set_pending() |
---|
53 | 46 | #endif |
---|
54 | 47 | |
---|
55 | 48 | #ifndef __io_pbw |
---|
.. | .. |
---|
65 | 58 | #endif |
---|
66 | 59 | |
---|
67 | 60 | #ifndef __io_par |
---|
68 | | -#define __io_par() __io_ar() |
---|
| 61 | +#define __io_par(v) __io_ar(v) |
---|
69 | 62 | #endif |
---|
70 | 63 | |
---|
71 | 64 | |
---|
.. | .. |
---|
158 | 151 | |
---|
159 | 152 | __io_br(); |
---|
160 | 153 | val = __raw_readb(addr); |
---|
161 | | - __io_ar(); |
---|
| 154 | + __io_ar(val); |
---|
162 | 155 | return val; |
---|
163 | 156 | } |
---|
164 | 157 | #endif |
---|
.. | .. |
---|
170 | 163 | u16 val; |
---|
171 | 164 | |
---|
172 | 165 | __io_br(); |
---|
173 | | - val = __le16_to_cpu(__raw_readw(addr)); |
---|
174 | | - __io_ar(); |
---|
| 166 | + val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); |
---|
| 167 | + __io_ar(val); |
---|
175 | 168 | return val; |
---|
176 | 169 | } |
---|
177 | 170 | #endif |
---|
.. | .. |
---|
183 | 176 | u32 val; |
---|
184 | 177 | |
---|
185 | 178 | __io_br(); |
---|
186 | | - val = __le32_to_cpu(__raw_readl(addr)); |
---|
187 | | - __io_ar(); |
---|
| 179 | + val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); |
---|
| 180 | + __io_ar(val); |
---|
188 | 181 | return val; |
---|
189 | 182 | } |
---|
190 | 183 | #endif |
---|
.. | .. |
---|
197 | 190 | u64 val; |
---|
198 | 191 | |
---|
199 | 192 | __io_br(); |
---|
200 | | - val = __le64_to_cpu(__raw_readq(addr)); |
---|
201 | | - __io_ar(); |
---|
| 193 | + val = __le64_to_cpu((__le64 __force)__raw_readq(addr)); |
---|
| 194 | + __io_ar(val); |
---|
202 | 195 | return val; |
---|
203 | 196 | } |
---|
204 | 197 | #endif |
---|
.. | .. |
---|
219 | 212 | static inline void writew(u16 value, volatile void __iomem *addr) |
---|
220 | 213 | { |
---|
221 | 214 | __io_bw(); |
---|
222 | | - __raw_writew(cpu_to_le16(value), addr); |
---|
| 215 | + __raw_writew((u16 __force)cpu_to_le16(value), addr); |
---|
223 | 216 | __io_aw(); |
---|
224 | 217 | } |
---|
225 | 218 | #endif |
---|
.. | .. |
---|
229 | 222 | static inline void writel(u32 value, volatile void __iomem *addr) |
---|
230 | 223 | { |
---|
231 | 224 | __io_bw(); |
---|
232 | | - __raw_writel(__cpu_to_le32(value), addr); |
---|
| 225 | + __raw_writel((u32 __force)__cpu_to_le32(value), addr); |
---|
233 | 226 | __io_aw(); |
---|
234 | 227 | } |
---|
235 | 228 | #endif |
---|
.. | .. |
---|
240 | 233 | static inline void writeq(u64 value, volatile void __iomem *addr) |
---|
241 | 234 | { |
---|
242 | 235 | __io_bw(); |
---|
243 | | - __raw_writeq(__cpu_to_le64(value), addr); |
---|
| 236 | + __raw_writeq((u64 __force)__cpu_to_le64(value), addr); |
---|
244 | 237 | __io_aw(); |
---|
245 | 238 | } |
---|
246 | 239 | #endif |
---|
.. | .. |
---|
455 | 448 | #define IO_SPACE_LIMIT 0xffff |
---|
456 | 449 | #endif |
---|
457 | 450 | |
---|
458 | | -#include <linux/logic_pio.h> |
---|
459 | | - |
---|
460 | 451 | /* |
---|
461 | 452 | * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be |
---|
462 | 453 | * implemented on hardware that needs an additional delay for I/O accesses to |
---|
463 | 454 | * take effect. |
---|
464 | 455 | */ |
---|
465 | 456 | |
---|
466 | | -#ifndef inb |
---|
467 | | -#define inb inb |
---|
468 | | -static inline u8 inb(unsigned long addr) |
---|
| 457 | +#if !defined(inb) && !defined(_inb) |
---|
| 458 | +#define _inb _inb |
---|
| 459 | +static inline u8 _inb(unsigned long addr) |
---|
469 | 460 | { |
---|
470 | 461 | u8 val; |
---|
471 | 462 | |
---|
472 | 463 | __io_pbr(); |
---|
473 | 464 | val = __raw_readb(PCI_IOBASE + addr); |
---|
474 | | - __io_par(); |
---|
| 465 | + __io_par(val); |
---|
475 | 466 | return val; |
---|
476 | 467 | } |
---|
477 | 468 | #endif |
---|
478 | 469 | |
---|
479 | | -#ifndef inw |
---|
480 | | -#define inw inw |
---|
481 | | -static inline u16 inw(unsigned long addr) |
---|
| 470 | +#if !defined(inw) && !defined(_inw) |
---|
| 471 | +#define _inw _inw |
---|
| 472 | +static inline u16 _inw(unsigned long addr) |
---|
482 | 473 | { |
---|
483 | 474 | u16 val; |
---|
484 | 475 | |
---|
485 | 476 | __io_pbr(); |
---|
486 | | - val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr)); |
---|
487 | | - __io_par(); |
---|
| 477 | + val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr)); |
---|
| 478 | + __io_par(val); |
---|
488 | 479 | return val; |
---|
489 | 480 | } |
---|
490 | 481 | #endif |
---|
491 | 482 | |
---|
492 | | -#ifndef inl |
---|
493 | | -#define inl inl |
---|
494 | | -static inline u32 inl(unsigned long addr) |
---|
| 483 | +#if !defined(inl) && !defined(_inl) |
---|
| 484 | +#define _inl _inl |
---|
| 485 | +static inline u32 _inl(unsigned long addr) |
---|
495 | 486 | { |
---|
496 | 487 | u32 val; |
---|
497 | 488 | |
---|
498 | 489 | __io_pbr(); |
---|
499 | | - val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr)); |
---|
500 | | - __io_par(); |
---|
| 490 | + val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr)); |
---|
| 491 | + __io_par(val); |
---|
501 | 492 | return val; |
---|
502 | 493 | } |
---|
503 | 494 | #endif |
---|
504 | 495 | |
---|
505 | | -#ifndef outb |
---|
506 | | -#define outb outb |
---|
507 | | -static inline void outb(u8 value, unsigned long addr) |
---|
| 496 | +#if !defined(outb) && !defined(_outb) |
---|
| 497 | +#define _outb _outb |
---|
| 498 | +static inline void _outb(u8 value, unsigned long addr) |
---|
508 | 499 | { |
---|
509 | 500 | __io_pbw(); |
---|
510 | 501 | __raw_writeb(value, PCI_IOBASE + addr); |
---|
.. | .. |
---|
512 | 503 | } |
---|
513 | 504 | #endif |
---|
514 | 505 | |
---|
515 | | -#ifndef outw |
---|
516 | | -#define outw outw |
---|
517 | | -static inline void outw(u16 value, unsigned long addr) |
---|
| 506 | +#if !defined(outw) && !defined(_outw) |
---|
| 507 | +#define _outw _outw |
---|
| 508 | +static inline void _outw(u16 value, unsigned long addr) |
---|
518 | 509 | { |
---|
519 | 510 | __io_pbw(); |
---|
520 | | - __raw_writew(cpu_to_le16(value), PCI_IOBASE + addr); |
---|
| 511 | + __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr); |
---|
521 | 512 | __io_paw(); |
---|
522 | 513 | } |
---|
523 | 514 | #endif |
---|
524 | 515 | |
---|
525 | | -#ifndef outl |
---|
526 | | -#define outl outl |
---|
527 | | -static inline void outl(u32 value, unsigned long addr) |
---|
| 516 | +#if !defined(outl) && !defined(_outl) |
---|
| 517 | +#define _outl _outl |
---|
| 518 | +static inline void _outl(u32 value, unsigned long addr) |
---|
528 | 519 | { |
---|
529 | 520 | __io_pbw(); |
---|
530 | | - __raw_writel(cpu_to_le32(value), PCI_IOBASE + addr); |
---|
| 521 | + __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr); |
---|
531 | 522 | __io_paw(); |
---|
532 | 523 | } |
---|
| 524 | +#endif |
---|
| 525 | + |
---|
| 526 | +#include <linux/logic_pio.h> |
---|
| 527 | + |
---|
| 528 | +#ifndef inb |
---|
| 529 | +#define inb _inb |
---|
| 530 | +#endif |
---|
| 531 | + |
---|
| 532 | +#ifndef inw |
---|
| 533 | +#define inw _inw |
---|
| 534 | +#endif |
---|
| 535 | + |
---|
| 536 | +#ifndef inl |
---|
| 537 | +#define inl _inl |
---|
| 538 | +#endif |
---|
| 539 | + |
---|
| 540 | +#ifndef outb |
---|
| 541 | +#define outb _outb |
---|
| 542 | +#endif |
---|
| 543 | + |
---|
| 544 | +#ifndef outw |
---|
| 545 | +#define outw _outw |
---|
| 546 | +#endif |
---|
| 547 | + |
---|
| 548 | +#ifndef outl |
---|
| 549 | +#define outl _outl |
---|
533 | 550 | #endif |
---|
534 | 551 | |
---|
535 | 552 | #ifndef inb_p |
---|
.. | .. |
---|
894 | 911 | #include <linux/vmalloc.h> |
---|
895 | 912 | #define __io_virt(x) ((void __force *)(x)) |
---|
896 | 913 | |
---|
897 | | -#ifndef CONFIG_GENERIC_IOMAP |
---|
898 | | -struct pci_dev; |
---|
899 | | -extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); |
---|
900 | | - |
---|
901 | | -#ifndef pci_iounmap |
---|
902 | | -#define pci_iounmap pci_iounmap |
---|
903 | | -static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) |
---|
904 | | -{ |
---|
905 | | -} |
---|
906 | | -#endif |
---|
907 | | -#endif /* CONFIG_GENERIC_IOMAP */ |
---|
908 | | - |
---|
909 | 914 | /* |
---|
910 | 915 | * Change virtual addresses to physical addresses and vv. |
---|
911 | 916 | * These are pretty trivial |
---|
.. | .. |
---|
929 | 934 | /** |
---|
930 | 935 | * DOC: ioremap() and ioremap_*() variants |
---|
931 | 936 | * |
---|
932 | | - * If you have an IOMMU your architecture is expected to have both ioremap() |
---|
933 | | - * and iounmap() implemented otherwise the asm-generic helpers will provide a |
---|
934 | | - * direct mapping. |
---|
| 937 | + * Architectures with an MMU are expected to provide ioremap() and iounmap() |
---|
| 938 | + * themselves or rely on GENERIC_IOREMAP. For NOMMU architectures we provide |
---|
| 939 | + * a default nop-op implementation that expect that the physical address used |
---|
| 940 | + * for MMIO are already marked as uncached, and can be used as kernel virtual |
---|
| 941 | + * addresses. |
---|
935 | 942 | * |
---|
936 | | - * There are ioremap_*() call variants, if you have no IOMMU we naturally will |
---|
937 | | - * default to direct mapping for all of them, you can override these defaults. |
---|
938 | | - * If you have an IOMMU you are highly encouraged to provide your own |
---|
939 | | - * ioremap variant implementation as there currently is no safe architecture |
---|
940 | | - * agnostic default. To avoid possible improper behaviour default asm-generic |
---|
941 | | - * ioremap_*() variants all return NULL when an IOMMU is available. If you've |
---|
942 | | - * defined your own ioremap_*() variant you must then declare your own |
---|
943 | | - * ioremap_*() variant as defined to itself to avoid the default NULL return. |
---|
| 943 | + * ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes |
---|
| 944 | + * for specific drivers if the architecture choses to implement them. If they |
---|
| 945 | + * are not implemented we fall back to plain ioremap. |
---|
944 | 946 | */ |
---|
945 | | - |
---|
946 | | -#ifdef CONFIG_MMU |
---|
947 | | - |
---|
948 | | -#ifndef ioremap_uc |
---|
949 | | -#define ioremap_uc ioremap_uc |
---|
950 | | -static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) |
---|
951 | | -{ |
---|
952 | | - return NULL; |
---|
953 | | -} |
---|
954 | | -#endif |
---|
955 | | - |
---|
956 | | -#else /* !CONFIG_MMU */ |
---|
957 | | - |
---|
958 | | -/* |
---|
959 | | - * Change "struct page" to physical address. |
---|
960 | | - * |
---|
961 | | - * This implementation is for the no-MMU case only... if you have an MMU |
---|
962 | | - * you'll need to provide your own definitions. |
---|
963 | | - */ |
---|
964 | | - |
---|
| 947 | +#ifndef CONFIG_MMU |
---|
965 | 948 | #ifndef ioremap |
---|
966 | 949 | #define ioremap ioremap |
---|
967 | 950 | static inline void __iomem *ioremap(phys_addr_t offset, size_t size) |
---|
.. | .. |
---|
970 | 953 | } |
---|
971 | 954 | #endif |
---|
972 | 955 | |
---|
973 | | -#ifndef __ioremap |
---|
974 | | -#define __ioremap __ioremap |
---|
975 | | -static inline void __iomem *__ioremap(phys_addr_t offset, size_t size, |
---|
976 | | - unsigned long flags) |
---|
977 | | -{ |
---|
978 | | - return ioremap(offset, size); |
---|
979 | | -} |
---|
980 | | -#endif |
---|
981 | | - |
---|
982 | 956 | #ifndef iounmap |
---|
983 | 957 | #define iounmap iounmap |
---|
984 | | - |
---|
985 | 958 | static inline void iounmap(void __iomem *addr) |
---|
986 | 959 | { |
---|
987 | 960 | } |
---|
988 | 961 | #endif |
---|
989 | | -#endif /* CONFIG_MMU */ |
---|
990 | | -#ifndef ioremap_nocache |
---|
991 | | -void __iomem *ioremap(phys_addr_t phys_addr, size_t size); |
---|
992 | | -#define ioremap_nocache ioremap_nocache |
---|
993 | | -static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size) |
---|
| 962 | +#elif defined(CONFIG_GENERIC_IOREMAP) |
---|
| 963 | +#include <linux/pgtable.h> |
---|
| 964 | + |
---|
| 965 | +void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot); |
---|
| 966 | +void iounmap(volatile void __iomem *addr); |
---|
| 967 | + |
---|
| 968 | +static inline void __iomem *ioremap(phys_addr_t addr, size_t size) |
---|
994 | 969 | { |
---|
995 | | - return ioremap(offset, size); |
---|
| 970 | + /* _PAGE_IOREMAP needs to be supplied by the architecture */ |
---|
| 971 | + return ioremap_prot(addr, size, _PAGE_IOREMAP); |
---|
996 | 972 | } |
---|
| 973 | +#endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */ |
---|
| 974 | + |
---|
| 975 | +#ifndef ioremap_wc |
---|
| 976 | +#define ioremap_wc ioremap |
---|
997 | 977 | #endif |
---|
998 | 978 | |
---|
| 979 | +#ifndef ioremap_wt |
---|
| 980 | +#define ioremap_wt ioremap |
---|
| 981 | +#endif |
---|
| 982 | + |
---|
| 983 | +/* |
---|
| 984 | + * ioremap_uc is special in that we do require an explicit architecture |
---|
| 985 | + * implementation. In general you do not want to use this function in a |
---|
| 986 | + * driver and use plain ioremap, which is uncached by default. Similarly |
---|
| 987 | + * architectures should not implement it unless they have a very good |
---|
| 988 | + * reason. |
---|
| 989 | + */ |
---|
999 | 990 | #ifndef ioremap_uc |
---|
1000 | 991 | #define ioremap_uc ioremap_uc |
---|
1001 | 992 | static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) |
---|
1002 | 993 | { |
---|
1003 | | - return ioremap_nocache(offset, size); |
---|
1004 | | -} |
---|
1005 | | -#endif |
---|
1006 | | - |
---|
1007 | | -#ifndef ioremap_wc |
---|
1008 | | -#define ioremap_wc ioremap_wc |
---|
1009 | | -static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size) |
---|
1010 | | -{ |
---|
1011 | | - return ioremap_nocache(offset, size); |
---|
1012 | | -} |
---|
1013 | | -#endif |
---|
1014 | | - |
---|
1015 | | -#ifndef ioremap_wt |
---|
1016 | | -#define ioremap_wt ioremap_wt |
---|
1017 | | -static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size) |
---|
1018 | | -{ |
---|
1019 | | - return ioremap_nocache(offset, size); |
---|
| 994 | + return NULL; |
---|
1020 | 995 | } |
---|
1021 | 996 | #endif |
---|
1022 | 997 | |
---|
.. | .. |
---|
1028 | 1003 | { |
---|
1029 | 1004 | port &= IO_SPACE_LIMIT; |
---|
1030 | 1005 | return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port; |
---|
| 1006 | +} |
---|
| 1007 | +#define __pci_ioport_unmap __pci_ioport_unmap |
---|
| 1008 | +static inline void __pci_ioport_unmap(void __iomem *p) |
---|
| 1009 | +{ |
---|
| 1010 | + uintptr_t start = (uintptr_t) PCI_IOBASE; |
---|
| 1011 | + uintptr_t addr = (uintptr_t) p; |
---|
| 1012 | + |
---|
| 1013 | + if (addr >= start && addr < start + IO_SPACE_LIMIT) |
---|
| 1014 | + return; |
---|
| 1015 | + iounmap(p); |
---|
1031 | 1016 | } |
---|
1032 | 1017 | #endif |
---|
1033 | 1018 | |
---|
.. | .. |
---|
1043 | 1028 | #endif /* CONFIG_GENERIC_IOMAP */ |
---|
1044 | 1029 | #endif /* CONFIG_HAS_IOPORT_MAP */ |
---|
1045 | 1030 | |
---|
| 1031 | +#ifndef CONFIG_GENERIC_IOMAP |
---|
| 1032 | +struct pci_dev; |
---|
| 1033 | +extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); |
---|
| 1034 | + |
---|
| 1035 | +#ifndef __pci_ioport_unmap |
---|
| 1036 | +static inline void __pci_ioport_unmap(void __iomem *p) {} |
---|
| 1037 | +#endif |
---|
| 1038 | + |
---|
| 1039 | +#ifndef pci_iounmap |
---|
| 1040 | +#define pci_iounmap pci_iounmap |
---|
| 1041 | +static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) |
---|
| 1042 | +{ |
---|
| 1043 | + __pci_ioport_unmap(p); |
---|
| 1044 | +} |
---|
| 1045 | +#endif |
---|
| 1046 | +#endif /* CONFIG_GENERIC_IOMAP */ |
---|
| 1047 | + |
---|
1046 | 1048 | /* |
---|
1047 | 1049 | * Convert a virtual cached pointer to an uncached pointer |
---|
1048 | 1050 | */ |
---|