hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/arch/powerpc/include/asm/synch.h
....@@ -3,8 +3,9 @@
33 #define _ASM_POWERPC_SYNCH_H
44 #ifdef __KERNEL__
55
6
+#include <asm/cputable.h>
67 #include <asm/feature-fixups.h>
7
-#include <asm/asm-const.h>
8
+#include <asm/ppc-opcode.h>
89
910 #ifndef __ASSEMBLY__
1011 extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
....@@ -20,6 +21,22 @@
2021 {
2122 __asm__ __volatile__ ("isync" : : : "memory");
2223 }
24
+
25
+static inline void ppc_after_tlbiel_barrier(void)
26
+{
27
+ asm volatile("ptesync": : :"memory");
28
+ /*
29
+ * POWER9, POWER10 need a cp_abort after tlbiel to ensure the copy is
30
+ * invalidated correctly. If this is not done, the paste can take data
31
+ * from the physical address that was translated at copy time.
32
+ *
33
+ * POWER9 in practice does not need this, because address spaces with
34
+ * accelerators mapped will use tlbie (which does invalidate the copy)
35
+ * to invalidate translations. It's not possible to limit POWER10 this
36
+ * way due to local copy-paste.
37
+ */
38
+ asm volatile(ASM_FTR_IFSET(PPC_CP_ABORT, "", %0) : : "i" (CPU_FTR_ARCH_31) : "memory");
39
+}
2340 #endif /* __ASSEMBLY__ */
2441
2542 #if defined(__powerpc64__)