From 6778948f9de86c3cfaf36725a7c87dcff9ba247f Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 08:20:59 +0000
Subject: [PATCH] kernel_5.10 no rt

---
 kernel/include/linux/highmem.h |  306 ++++++++++++++++++++++++++++++++------------------
 1 files changed, 193 insertions(+), 113 deletions(-)

diff --git a/kernel/include/linux/highmem.h b/kernel/include/linux/highmem.h
index be24378..220e92c 100644
--- a/kernel/include/linux/highmem.h
+++ b/kernel/include/linux/highmem.h
@@ -11,119 +11,6 @@
 
 #include <asm/cacheflush.h>
 
-#include "highmem-internal.h"
-
-/**
- * kmap - Map a page for long term usage
- * @page:	Pointer to the page to be mapped
- *
- * Returns: The virtual address of the mapping
- *
- * Can only be invoked from preemptible task context because on 32bit
- * systems with CONFIG_HIGHMEM enabled this function might sleep.
- *
- * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
- * this returns the virtual address of the direct kernel mapping.
- *
- * The returned virtual address is globally visible and valid up to the
- * point where it is unmapped via kunmap(). The pointer can be handed to
- * other contexts.
- *
- * For highmem pages on 32bit systems this can be slow as the mapping space
- * is limited and protected by a global lock. In case that there is no
- * mapping slot available the function blocks until a slot is released via
- * kunmap().
- */
-static inline void *kmap(struct page *page);
-
-/**
- * kunmap - Unmap the virtual address mapped by kmap()
- * @addr:	Virtual address to be unmapped
- *
- * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
- * pages in the low memory area.
- */
-static inline void kunmap(struct page *page);
-
-/**
- * kmap_to_page - Get the page for a kmap'ed address
- * @addr:	The address to look up
- *
- * Returns: The page which is mapped to @addr.
- */
-static inline struct page *kmap_to_page(void *addr);
-
-/**
- * kmap_flush_unused - Flush all unused kmap mappings in order to
- *		       remove stray mappings
- */
-static inline void kmap_flush_unused(void);
-
-/**
- * kmap_local_page - Map a page for temporary usage
- * @page:	Pointer to the page to be mapped
- *
- * Returns: The virtual address of the mapping
- *
- * Can be invoked from any context.
- *
- * Requires careful handling when nesting multiple mappings because the map
- * management is stack based. The unmap has to be in the reverse order of
- * the map operation:
- *
- * addr1 = kmap_local_page(page1);
- * addr2 = kmap_local_page(page2);
- * ...
- * kunmap_local(addr2);
- * kunmap_local(addr1);
- *
- * Unmapping addr1 before addr2 is invalid and causes malfunction.
- *
- * Contrary to kmap() mappings the mapping is only valid in the context of
- * the caller and cannot be handed to other contexts.
- *
- * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
- * virtual address of the direct mapping. Only real highmem pages are
- * temporarily mapped.
- *
- * While it is significantly faster than kmap() for the higmem case it
- * comes with restrictions about the pointer validity. Only use when really
- * necessary.
- *
- * On HIGHMEM enabled systems mapping a highmem page has the side effect of
- * disabling migration in order to keep the virtual address stable across
- * preemption. No caller of kmap_local_page() can rely on this side effect.
- */
-static inline void *kmap_local_page(struct page *page);
-
-/**
- * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
- * @page:	Pointer to the page to be mapped
- *
- * Returns: The virtual address of the mapping
- *
- * Effectively a wrapper around kmap_local_page() which disables pagefaults
- * and preemption.
- *
- * Do not use in new code. Use kmap_local_page() instead.
- */
-static inline void *kmap_atomic(struct page *page);
-
-/**
- * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic()
- * @addr:	Virtual address to be unmapped
- *
- * Counterpart to kmap_atomic().
- *
- * Effectively a wrapper around kunmap_local() which additionally undoes
- * the side effects of kmap_atomic(), i.e. reenabling pagefaults and
- * preemption.
- */
-
-/* Highmem related interfaces for management code */
-static inline unsigned int nr_free_highpages(void);
-static inline unsigned long totalhigh_pages(void);
-
 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
 {
@@ -142,6 +29,199 @@
 }
 #endif
 
+#include <asm/kmap_types.h>
+
+#ifdef CONFIG_HIGHMEM
+extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
+extern void kunmap_atomic_high(void *kvaddr);
+#include <asm/highmem.h>
+
+#ifndef ARCH_HAS_KMAP_FLUSH_TLB
+static inline void kmap_flush_tlb(unsigned long addr) { }
+#endif
+
+#ifndef kmap_prot
+#define kmap_prot PAGE_KERNEL
+#endif
+
+void *kmap_high(struct page *page);
+static inline void *kmap(struct page *page)
+{
+	void *addr;
+
+	might_sleep();
+	if (!PageHighMem(page))
+		addr = page_address(page);
+	else
+		addr = kmap_high(page);
+	kmap_flush_tlb((unsigned long)addr);
+	return addr;
+}
+
+void kunmap_high(struct page *page);
+
+static inline void kunmap(struct page *page)
+{
+	might_sleep();
+	if (!PageHighMem(page))
+		return;
+	kunmap_high(page);
+}
+
+/*
+ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
+ * no global lock is needed and because the kmap code must perform a global TLB
+ * invalidation when the kmap pool wraps.
+ *
+ * However when holding an atomic kmap it is not legal to sleep, so atomic
+ * kmaps are appropriate for short, tight code paths only.
+ *
+ * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
+ * gives a more generic (and caching) interface. But kmap_atomic can
+ * be used in IRQ contexts, so in some (very limited) cases we need
+ * it.
+ */
+static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+{
+	preempt_disable();
+	pagefault_disable();
+	if (!PageHighMem(page))
+		return page_address(page);
+	return kmap_atomic_high_prot(page, prot);
+}
+#define kmap_atomic(page)	kmap_atomic_prot(page, kmap_prot)
+
+/* declarations for linux/mm/highmem.c */
+unsigned int nr_free_highpages(void);
+extern atomic_long_t _totalhigh_pages;
+static inline unsigned long totalhigh_pages(void)
+{
+	return (unsigned long)atomic_long_read(&_totalhigh_pages);
+}
+
+static inline void totalhigh_pages_inc(void)
+{
+	atomic_long_inc(&_totalhigh_pages);
+}
+
+static inline void totalhigh_pages_dec(void)
+{
+	atomic_long_dec(&_totalhigh_pages);
+}
+
+static inline void totalhigh_pages_add(long count)
+{
+	atomic_long_add(count, &_totalhigh_pages);
+}
+
+static inline void totalhigh_pages_set(long val)
+{
+	atomic_long_set(&_totalhigh_pages, val);
+}
+
+void kmap_flush_unused(void);
+
+struct page *kmap_to_page(void *addr);
+
+#else /* CONFIG_HIGHMEM */
+
+static inline unsigned int nr_free_highpages(void) { return 0; }
+
+static inline struct page *kmap_to_page(void *addr)
+{
+	return virt_to_page(addr);
+}
+
+static inline unsigned long totalhigh_pages(void) { return 0UL; }
+
+static inline void *kmap(struct page *page)
+{
+	might_sleep();
+	return page_address(page);
+}
+
+static inline void kunmap_high(struct page *page)
+{
+}
+
+static inline void kunmap(struct page *page)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+	kunmap_flush_on_unmap(page_address(page));
+#endif
+}
+
+static inline void *kmap_atomic(struct page *page)
+{
+	preempt_disable();
+	pagefault_disable();
+	return page_address(page);
+}
+#define kmap_atomic_prot(page, prot)	kmap_atomic(page)
+
+static inline void kunmap_atomic_high(void *addr)
+{
+	/*
+	 * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
+	 * handles re-enabling faults + preemption
+	 */
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+	kunmap_flush_on_unmap(addr);
+#endif
+}
+
+#define kmap_atomic_pfn(pfn)	kmap_atomic(pfn_to_page(pfn))
+
+#define kmap_flush_unused()	do {} while(0)
+
+#endif /* CONFIG_HIGHMEM */
+
+#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+
+DECLARE_PER_CPU(int, __kmap_atomic_idx);
+
+static inline int kmap_atomic_idx_push(void)
+{
+	int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+	WARN_ON_ONCE(in_irq() && !irqs_disabled());
+	BUG_ON(idx >= KM_TYPE_NR);
+#endif
+	return idx;
+}
+
+static inline int kmap_atomic_idx(void)
+{
+	return __this_cpu_read(__kmap_atomic_idx) - 1;
+}
+
+static inline void kmap_atomic_idx_pop(void)
+{
+#ifdef CONFIG_DEBUG_HIGHMEM
+	int idx = __this_cpu_dec_return(__kmap_atomic_idx);
+
+	BUG_ON(idx < 0);
+#else
+	__this_cpu_dec(__kmap_atomic_idx);
+#endif
+}
+
+#endif
+
+/*
+ * Prevent people trying to call kunmap_atomic() as if it were kunmap()
+ * kunmap_atomic() should get the return value of kmap_atomic, not the page.
+ */
+#define kunmap_atomic(addr)                                     \
+do {                                                            \
+	BUILD_BUG_ON(__same_type((addr), struct page *));       \
+	kunmap_atomic_high(addr);                                  \
+	pagefault_enable();                                     \
+	preempt_enable();                                       \
+} while (0)
+
+
 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
 #ifndef clear_user_highpage
 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)

--
Gitblit v1.6.2