From 297b60346df8beafee954a0fd7c2d64f33f3b9bc Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Sat, 11 May 2024 01:44:05 +0000 Subject: [PATCH] rtl8211F_led_control --- kernel/arch/ia64/include/asm/pgtable.h | 67 +++++++++------------------------ 1 files changed, 18 insertions(+), 49 deletions(-) diff --git a/kernel/arch/ia64/include/asm/pgtable.h b/kernel/arch/ia64/include/asm/pgtable.h index 1658277..6e5c387 100644 --- a/kernel/arch/ia64/include/asm/pgtable.h +++ b/kernel/arch/ia64/include/asm/pgtable.h @@ -279,16 +279,16 @@ #define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud))) #define pud_present(pud) (pud_val(pud) != 0UL) #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) -#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK)) +#define pud_pgtable(pud) ((pmd_t *) __va(pud_val(pud) & _PFN_MASK)) #define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET)) #if CONFIG_PGTABLE_LEVELS == 4 -#define pgd_none(pgd) (!pgd_val(pgd)) -#define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd))) -#define pgd_present(pgd) (pgd_val(pgd) != 0UL) -#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL) -#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK)) -#define pgd_page(pgd) virt_to_page((pgd_val(pgd) + PAGE_OFFSET)) +#define p4d_none(p4d) (!p4d_val(p4d)) +#define p4d_bad(p4d) (!ia64_phys_addr_valid(p4d_val(p4d))) +#define p4d_present(p4d) (p4d_val(p4d) != 0UL) +#define p4d_clear(p4dp) (p4d_val(*(p4dp)) = 0UL) +#define p4d_pgtable(p4d) ((pud_t *) __va(p4d_val(p4d) & _PFN_MASK)) +#define p4d_page(p4d) virt_to_page((p4d_val(p4d) + PAGE_OFFSET)) #endif /* @@ -298,7 +298,6 @@ #define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0) #define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0) #define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0) -#define pte_special(pte) 0 /* * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the @@ -311,7 +310,6 @@ #define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D)) #define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D)) #define pte_mkhuge(pte) (__pte(pte_val(pte))) -#define pte_mkspecial(pte) (pte) /* * Because ia64's Icache and Dcache is not coherent (on a cpu), we need to @@ -366,17 +364,14 @@ return (region << (PAGE_SHIFT - 6)) | l1index; } +#define pgd_index pgd_index -/* The offset in the 1-level directory is given by the 3 region bits - (61..63) and the level-1 bits. */ -static inline pgd_t* -pgd_offset (const struct mm_struct *mm, unsigned long address) -{ - return mm->pgd + pgd_index(address); -} - -/* In the kernel's mapped region we completely ignore the region number - (since we know it's in region number 5). */ +/* + * In the kernel's mapped region we know everything is in region number 5, so + * as an optimisation its PGD already points to the area for that region. + * However, this also means that we cannot use pgd_index() and we must + * never add the region here. + */ #define pgd_offset_k(addr) \ (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))) @@ -384,25 +379,6 @@ resides in the kernel-mapped segment, hence we use pgd_offset_k() here. */ #define pgd_offset_gate(mm, addr) pgd_offset_k(addr) - -#if CONFIG_PGTABLE_LEVELS == 4 -/* Find an entry in the second-level page table.. */ -#define pud_offset(dir,addr) \ - ((pud_t *) pgd_page_vaddr(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) -#endif - -/* Find an entry in the third-level page table.. */ -#define pmd_offset(dir,addr) \ - ((pmd_t *) pud_page_vaddr(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) - -/* - * Find an entry in the third-level page table. This looks more complicated than it - * should be because some platforms place page tables in high memory. - */ -#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) -#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) -#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr) -#define pte_unmap(pte) do { } while (0) /* atomic versions of the some PTE manipulations: */ @@ -544,9 +520,9 @@ # ifdef CONFIG_VIRTUAL_MEM_MAP /* arch mem_map init routine is needed due to holes in a virtual mem_map */ -# define __HAVE_ARCH_MEMMAP_INIT - extern void memmap_init (unsigned long size, int nid, unsigned long zone, - unsigned long start_pfn); +void memmap_init(void); +void arch_memmap_init(unsigned long size, int nid, unsigned long zone, + unsigned long start_pfn); # endif /* CONFIG_VIRTUAL_MEM_MAP */ # endif /* !__ASSEMBLY__ */ @@ -567,11 +543,6 @@ #define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M #define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT) -/* - * No page table caches to initialise - */ -#define pgtable_cache_init() do { } while (0) - /* These tell get_user_pages() that the first gate page is accessible from user-level. */ #define FIXADDR_USER_START GATE_ADDR #ifdef HAVE_BUGGY_SEGREL @@ -588,10 +559,8 @@ #if CONFIG_PGTABLE_LEVELS == 3 -#define __ARCH_USE_5LEVEL_HACK #include <asm-generic/pgtable-nopud.h> #endif -#include <asm-generic/5level-fixup.h> -#include <asm-generic/pgtable.h> +#include <asm-generic/pgtable-nop4d.h> #endif /* _ASM_IA64_PGTABLE_H */ -- Gitblit v1.6.2