From 95099d4622f8cb224d94e314c7a8e0df60b13f87 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Sat, 09 Dec 2023 08:38:01 +0000
Subject: [PATCH] enable docker ppp
---
kernel/arch/sh/mm/ioremap.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++--
1 files changed, 57 insertions(+), 2 deletions(-)
diff --git a/kernel/arch/sh/mm/ioremap.c b/kernel/arch/sh/mm/ioremap.c
index d09ddfe..2134258 100644
--- a/kernel/arch/sh/mm/ioremap.c
+++ b/kernel/arch/sh/mm/ioremap.c
@@ -18,12 +18,59 @@
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/io.h>
+#include <asm/io_trapped.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/addrspace.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/mmu.h>
+#include "ioremap.h"
+
+/*
+ * On 32-bit SH, we traditionally have the whole physical address space mapped
+ * at all times (as MIPS does), so "ioremap()" and "iounmap()" do not need to do
+ * anything but place the address in the proper segment. This is true for P1
+ * and P2 addresses, as well as some P3 ones. However, most of the P3 addresses
+ * and newer cores using extended addressing need to map through page tables, so
+ * the ioremap() implementation becomes a bit more complicated.
+ */
+#ifdef CONFIG_29BIT
+static void __iomem *
+__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
+{
+ phys_addr_t last_addr = offset + size - 1;
+
+ /*
+ * For P1 and P2 space this is trivial, as everything is already
+ * mapped. Uncached access for P1 addresses are done through P2.
+ * In the P3 case or for addresses outside of the 29-bit space,
+ * mapping must be done by the PMB or by using page tables.
+ */
+ if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
+ u64 flags = pgprot_val(prot);
+
+ /*
+ * Anything using the legacy PTEA space attributes needs
+ * to be kicked down to page table mappings.
+ */
+ if (unlikely(flags & _PAGE_PCC_MASK))
+ return NULL;
+ if (unlikely(flags & _PAGE_CACHABLE))
+ return (void __iomem *)P1SEGADDR(offset);
+
+ return (void __iomem *)P2SEGADDR(offset);
+ }
+
+ /* P4 above the store queues are always mapped. */
+ if (unlikely(offset >= P3_ADDR_MAX))
+ return (void __iomem *)P4SEGADDR(offset);
+
+ return NULL;
+}
+#else
+#define __ioremap_29bit(offset, size, prot) NULL
+#endif /* CONFIG_29BIT */
/*
* Remap an arbitrary physical address space into the kernel virtual
@@ -41,6 +88,14 @@
struct vm_struct *area;
unsigned long offset, last_addr, addr, orig_addr;
void __iomem *mapped;
+
+ mapped = __ioremap_trapped(phys_addr, size);
+ if (mapped)
+ return mapped;
+
+ mapped = __ioremap_29bit(phys_addr, size, pgprot);
+ if (mapped)
+ return mapped;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
@@ -103,7 +158,7 @@
return 0;
}
-void __iounmap(void __iomem *addr)
+void iounmap(void __iomem *addr)
{
unsigned long vaddr = (unsigned long __force)addr;
struct vm_struct *p;
@@ -134,4 +189,4 @@
kfree(p);
}
-EXPORT_SYMBOL(__iounmap);
+EXPORT_SYMBOL(iounmap);
--
Gitblit v1.6.2