From 95099d4622f8cb224d94e314c7a8e0df60b13f87 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Sat, 09 Dec 2023 08:38:01 +0000
Subject: [PATCH] enable docker ppp
---
kernel/arch/powerpc/include/asm/synch.h | 19 ++++++++++++++++++-
1 files changed, 18 insertions(+), 1 deletions(-)
diff --git a/kernel/arch/powerpc/include/asm/synch.h b/kernel/arch/powerpc/include/asm/synch.h
index aca70fb..1d67bc8 100644
--- a/kernel/arch/powerpc/include/asm/synch.h
+++ b/kernel/arch/powerpc/include/asm/synch.h
@@ -3,8 +3,9 @@
#define _ASM_POWERPC_SYNCH_H
#ifdef __KERNEL__
+#include <asm/cputable.h>
#include <asm/feature-fixups.h>
-#include <asm/asm-const.h>
+#include <asm/ppc-opcode.h>
#ifndef __ASSEMBLY__
extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
@@ -20,6 +21,22 @@
{
__asm__ __volatile__ ("isync" : : : "memory");
}
+
+static inline void ppc_after_tlbiel_barrier(void)
+{
+ asm volatile("ptesync": : :"memory");
+ /*
+ * POWER9, POWER10 need a cp_abort after tlbiel to ensure the copy is
+ * invalidated correctly. If this is not done, the paste can take data
+ * from the physical address that was translated at copy time.
+ *
+ * POWER9 in practice does not need this, because address spaces with
+ * accelerators mapped will use tlbie (which does invalidate the copy)
+ * to invalidate translations. It's not possible to limit POWER10 this
+ * way due to local copy-paste.
+ */
+ asm volatile(ASM_FTR_IFSET(PPC_CP_ABORT, "", %0) : : "i" (CPU_FTR_ARCH_31) : "memory");
+}
#endif /* __ASSEMBLY__ */
#if defined(__powerpc64__)
--
Gitblit v1.6.2