From 6778948f9de86c3cfaf36725a7c87dcff9ba247f Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Mon, 11 Dec 2023 08:20:59 +0000 Subject: [PATCH] kernel_5.10 no rt --- kernel/mm/highmem.c | 262 +++------------------------------------------------- 1 files changed, 15 insertions(+), 247 deletions(-) diff --git a/kernel/mm/highmem.c b/kernel/mm/highmem.c index 72b9a2d..1352a27 100644 --- a/kernel/mm/highmem.c +++ b/kernel/mm/highmem.c @@ -31,6 +31,10 @@ #include <asm/tlbflush.h> #include <linux/vmalloc.h> +#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) +DEFINE_PER_CPU(int, __kmap_atomic_idx); +#endif + /* * Virtual_count is not a pure "count". * 0 means that it is not mapped, and has not been mapped @@ -104,7 +108,9 @@ atomic_long_t _totalhigh_pages __read_mostly; EXPORT_SYMBOL(_totalhigh_pages); -unsigned int __nr_free_highpages (void) +EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); + +unsigned int nr_free_highpages (void) { struct zone *zone; unsigned int pages = 0; @@ -141,7 +147,7 @@ do { spin_unlock(&kmap_lock); (void)(flags); } while (0) #endif -struct page *__kmap_to_page(void *vaddr) +struct page *kmap_to_page(void *vaddr) { unsigned long addr = (unsigned long)vaddr; @@ -152,7 +158,7 @@ return virt_to_page(addr); } -EXPORT_SYMBOL(__kmap_to_page); +EXPORT_SYMBOL(kmap_to_page); static void flush_all_zero_pkmaps(void) { @@ -194,7 +200,10 @@ flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); } -void __kmap_flush_unused(void) +/** + * kmap_flush_unused - flush all unused kmap mappings in order to remove stray mappings + */ +void kmap_flush_unused(void) { lock_kmap(); flush_all_zero_pkmaps(); @@ -358,250 +367,9 @@ if (need_wakeup) wake_up(pkmap_map_wait); } + EXPORT_SYMBOL(kunmap_high); -#endif /* CONFIG_HIGHMEM */ - -#ifdef CONFIG_KMAP_LOCAL - -#include <asm/kmap_size.h> - -/* - * With DEBUG_HIGHMEM the stack depth is doubled and every second - * slot is unused which acts as a guard page - */ -#ifdef CONFIG_DEBUG_HIGHMEM -# define KM_INCR 2 -#else -# define KM_INCR 1 -#endif - -static inline int kmap_local_idx_push(void) -{ - WARN_ON_ONCE(in_irq() && !irqs_disabled()); - current->kmap_ctrl.idx += KM_INCR; - BUG_ON(current->kmap_ctrl.idx >= KM_MAX_IDX); - return current->kmap_ctrl.idx - 1; -} - -static inline int kmap_local_idx(void) -{ - return current->kmap_ctrl.idx - 1; -} - -static inline void kmap_local_idx_pop(void) -{ - current->kmap_ctrl.idx -= KM_INCR; - BUG_ON(current->kmap_ctrl.idx < 0); -} - -#ifndef arch_kmap_local_post_map -# define arch_kmap_local_post_map(vaddr, pteval) do { } while (0) -#endif - -#ifndef arch_kmap_local_pre_unmap -# define arch_kmap_local_pre_unmap(vaddr) do { } while (0) -#endif - -#ifndef arch_kmap_local_post_unmap -# define arch_kmap_local_post_unmap(vaddr) do { } while (0) -#endif - -#ifndef arch_kmap_local_map_idx -#define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx) -#endif - -#ifndef arch_kmap_local_unmap_idx -#define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx) -#endif - -#ifndef arch_kmap_local_high_get -static inline void *arch_kmap_local_high_get(struct page *page) -{ - return NULL; -} -#endif - -/* Unmap a local mapping which was obtained by kmap_high_get() */ -static inline bool kmap_high_unmap_local(unsigned long vaddr) -{ -#ifdef ARCH_NEEDS_KMAP_HIGH_GET - if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { - kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); - return true; - } -#endif - return false; -} - -static inline int kmap_local_calc_idx(int idx) -{ - return idx + KM_MAX_IDX * smp_processor_id(); -} - -static pte_t *__kmap_pte; - -static pte_t *kmap_get_pte(void) -{ - if (!__kmap_pte) - __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); - return __kmap_pte; -} - -void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) -{ - pte_t pteval, *kmap_pte = kmap_get_pte(); - unsigned long vaddr; - int idx; - - /* - * Disable migration so resulting virtual address is stable - * accross preemption. - */ - migrate_disable(); - preempt_disable(); - idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - BUG_ON(!pte_none(*(kmap_pte - idx))); - pteval = pfn_pte(pfn, prot); - set_pte_at(&init_mm, vaddr, kmap_pte - idx, pteval); - arch_kmap_local_post_map(vaddr, pteval); - current->kmap_ctrl.pteval[kmap_local_idx()] = pteval; - preempt_enable(); - - return (void *)vaddr; -} -EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot); - -void *__kmap_local_page_prot(struct page *page, pgprot_t prot) -{ - void *kmap; - - if (!PageHighMem(page)) - return page_address(page); - - /* Try kmap_high_get() if architecture has it enabled */ - kmap = arch_kmap_local_high_get(page); - if (kmap) - return kmap; - - return __kmap_local_pfn_prot(page_to_pfn(page), prot); -} -EXPORT_SYMBOL(__kmap_local_page_prot); - -void kunmap_local_indexed(void *vaddr) -{ - unsigned long addr = (unsigned long) vaddr & PAGE_MASK; - pte_t *kmap_pte = kmap_get_pte(); - int idx; - - if (addr < __fix_to_virt(FIX_KMAP_END) || - addr > __fix_to_virt(FIX_KMAP_BEGIN)) { - /* - * Handle mappings which were obtained by kmap_high_get() - * first as the virtual address of such mappings is below - * PAGE_OFFSET. Warn for all other addresses which are in - * the user space part of the virtual address space. - */ - if (!kmap_high_unmap_local(addr)) - WARN_ON_ONCE(addr < PAGE_OFFSET); - return; - } - - preempt_disable(); - idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr); - WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); - - arch_kmap_local_pre_unmap(addr); - pte_clear(&init_mm, addr, kmap_pte - idx); - arch_kmap_local_post_unmap(addr); - current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0); - kmap_local_idx_pop(); - preempt_enable(); - migrate_enable(); -} -EXPORT_SYMBOL(kunmap_local_indexed); - -/* - * Invoked before switch_to(). This is safe even when during or after - * clearing the maps an interrupt which needs a kmap_local happens because - * the task::kmap_ctrl.idx is not modified by the unmapping code so a - * nested kmap_local will use the next unused index and restore the index - * on unmap. The already cleared kmaps of the outgoing task are irrelevant - * because the interrupt context does not know about them. The same applies - * when scheduling back in for an interrupt which happens before the - * restore is complete. - */ -void __kmap_local_sched_out(void) -{ - struct task_struct *tsk = current; - pte_t *kmap_pte = kmap_get_pte(); - int i; - - /* Clear kmaps */ - for (i = 0; i < tsk->kmap_ctrl.idx; i++) { - pte_t pteval = tsk->kmap_ctrl.pteval[i]; - unsigned long addr; - int idx; - - /* With debug all even slots are unmapped and act as guard */ - if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) { - WARN_ON_ONCE(!pte_none(pteval)); - continue; - } - if (WARN_ON_ONCE(pte_none(pteval))) - continue; - - /* - * This is a horrible hack for XTENSA to calculate the - * coloured PTE index. Uses the PFN encoded into the pteval - * and the map index calculation because the actual mapped - * virtual address is not stored in task::kmap_ctrl. - * For any sane architecture this is optimized out. - */ - idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); - - addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - arch_kmap_local_pre_unmap(addr); - pte_clear(&init_mm, addr, kmap_pte - idx); - arch_kmap_local_post_unmap(addr); - } -} - -void __kmap_local_sched_in(void) -{ - struct task_struct *tsk = current; - pte_t *kmap_pte = kmap_get_pte(); - int i; - - /* Restore kmaps */ - for (i = 0; i < tsk->kmap_ctrl.idx; i++) { - pte_t pteval = tsk->kmap_ctrl.pteval[i]; - unsigned long addr; - int idx; - - /* With debug all even slots are unmapped and act as guard */ - if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) { - WARN_ON_ONCE(!pte_none(pteval)); - continue; - } - if (WARN_ON_ONCE(pte_none(pteval))) - continue; - - /* See comment in __kmap_local_sched_out() */ - idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); - addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - set_pte_at(&init_mm, addr, kmap_pte - idx, pteval); - arch_kmap_local_post_map(addr, pteval); - } -} - -void kmap_local_fork(struct task_struct *tsk) -{ - if (WARN_ON_ONCE(tsk->kmap_ctrl.idx)) - memset(&tsk->kmap_ctrl, 0, sizeof(tsk->kmap_ctrl)); -} - -#endif +#endif /* CONFIG_HIGHMEM */ #if defined(HASHED_PAGE_VIRTUAL) -- Gitblit v1.6.2