forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 10ebd8556b7990499c896a550e3d416b444211e6
kernel/arch/sparc/include/asm/pgtable_64.h
....@@ -13,7 +13,7 @@
1313 * the SpitFire page tables.
1414 */
1515
16
-#include <asm-generic/5level-fixup.h>
16
+#include <asm-generic/pgtable-nop4d.h>
1717 #include <linux/compiler.h>
1818 #include <linux/const.h>
1919 #include <asm/types.h>
....@@ -230,36 +230,6 @@
230230
231231 extern struct page *mem_map_zero;
232232 #define ZERO_PAGE(vaddr) (mem_map_zero)
233
-
234
-/* This macro must be updated when the size of struct page grows above 80
235
- * or reduces below 64.
236
- * The idea that compiler optimizes out switch() statement, and only
237
- * leaves clrx instructions
238
- */
239
-#define mm_zero_struct_page(pp) do { \
240
- unsigned long *_pp = (void *)(pp); \
241
- \
242
- /* Check that struct page is either 64, 72, or 80 bytes */ \
243
- BUILD_BUG_ON(sizeof(struct page) & 7); \
244
- BUILD_BUG_ON(sizeof(struct page) < 64); \
245
- BUILD_BUG_ON(sizeof(struct page) > 80); \
246
- \
247
- switch (sizeof(struct page)) { \
248
- case 80: \
249
- _pp[9] = 0; /* fallthrough */ \
250
- case 72: \
251
- _pp[8] = 0; /* fallthrough */ \
252
- default: \
253
- _pp[7] = 0; \
254
- _pp[6] = 0; \
255
- _pp[5] = 0; \
256
- _pp[4] = 0; \
257
- _pp[3] = 0; \
258
- _pp[2] = 0; \
259
- _pp[1] = 0; \
260
- _pp[0] = 0; \
261
- } \
262
-} while (0)
263233
264234 /* PFNs are real physical page numbers. However, mem_map only begins to record
265235 * per-page information starting at pfn_base. This is to handle systems where
....@@ -713,6 +683,7 @@
713683 return pte_val(pte) & _PAGE_SPECIAL;
714684 }
715685
686
+#define pmd_leaf pmd_large
716687 static inline unsigned long pmd_large(pmd_t pmd)
717688 {
718689 pte_t pte = __pte(pmd_val(pmd));
....@@ -840,9 +811,9 @@
840811
841812 #define pud_bad(pud) (pud_val(pud) & ~PAGE_MASK)
842813
843
-#define pgd_none(pgd) (!pgd_val(pgd))
814
+#define p4d_none(p4d) (!p4d_val(p4d))
844815
845
-#define pgd_bad(pgd) (pgd_val(pgd) & ~PAGE_MASK)
816
+#define p4d_bad(p4d) (p4d_val(p4d) & ~PAGE_MASK)
846817
847818 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
848819 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
....@@ -864,7 +835,7 @@
864835
865836 #define pud_set(pudp, pmdp) \
866837 (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp))))
867
-static inline unsigned long __pmd_page(pmd_t pmd)
838
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
868839 {
869840 pte_t pte = __pte(pmd_val(pmd));
870841 unsigned long pfn;
....@@ -874,26 +845,30 @@
874845 return ((unsigned long) __va(pfn << PAGE_SHIFT));
875846 }
876847
877
-static inline unsigned long pud_page_vaddr(pud_t pud)
848
+static inline pmd_t *pud_pgtable(pud_t pud)
878849 {
879850 pte_t pte = __pte(pud_val(pud));
880851 unsigned long pfn;
881852
882853 pfn = pte_pfn(pte);
883854
884
- return ((unsigned long) __va(pfn << PAGE_SHIFT));
855
+ return ((pmd_t *) __va(pfn << PAGE_SHIFT));
885856 }
886857
887
-#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
888
-#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
858
+#define pmd_page(pmd) virt_to_page((void *)pmd_page_vaddr(pmd))
859
+#define pud_page(pud) virt_to_page((void *)pud_pgtable(pud))
889860 #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
890861 #define pud_present(pud) (pud_val(pud) != 0U)
891862 #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
892
-#define pgd_page_vaddr(pgd) \
893
- ((unsigned long) __va(pgd_val(pgd)))
894
-#define pgd_present(pgd) (pgd_val(pgd) != 0U)
895
-#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL)
863
+#define p4d_pgtable(p4d) \
864
+ ((pud_t *) __va(p4d_val(p4d)))
865
+#define p4d_present(p4d) (p4d_val(p4d) != 0U)
866
+#define p4d_clear(p4dp) (p4d_val(*(p4dp)) = 0UL)
896867
868
+/* only used by the stubbed out hugetlb gup code, should never be called */
869
+#define p4d_page(p4d) NULL
870
+
871
+#define pud_leaf pud_large
897872 static inline unsigned long pud_large(pud_t pud)
898873 {
899874 pte_t pte = __pte(pud_val(pud));
....@@ -911,33 +886,8 @@
911886 /* Same in both SUN4V and SUN4U. */
912887 #define pte_none(pte) (!pte_val(pte))
913888
914
-#define pgd_set(pgdp, pudp) \
915
- (pgd_val(*(pgdp)) = (__pa((unsigned long) (pudp))))
916
-
917
-/* to find an entry in a page-table-directory. */
918
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
919
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
920
-
921
-/* to find an entry in a kernel page-table-directory */
922
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
923
-
924
-/* Find an entry in the third-level page table.. */
925
-#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
926
-#define pud_offset(pgdp, address) \
927
- ((pud_t *) pgd_page_vaddr(*(pgdp)) + pud_index(address))
928
-
929
-/* Find an entry in the second-level page table.. */
930
-#define pmd_offset(pudp, address) \
931
- ((pmd_t *) pud_page_vaddr(*(pudp)) + \
932
- (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)))
933
-
934
-/* Find an entry in the third-level page table.. */
935
-#define pte_index(dir, address) \
936
- ((pte_t *) __pmd_page(*(dir)) + \
937
- ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
938
-#define pte_offset_kernel pte_index
939
-#define pte_offset_map pte_index
940
-#define pte_unmap(pte) do { } while (0)
889
+#define p4d_set(p4dp, pudp) \
890
+ (p4d_val(*(p4dp)) = (__pa((unsigned long) (pudp))))
941891
942892 /* We cannot include <linux/mm_types.h> at this point yet: */
943893 extern struct mm_struct init_mm;
....@@ -1103,10 +1053,50 @@
11031053
11041054 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
11051055 }
1106
-#define io_remap_pfn_range io_remap_pfn_range
1056
+#define io_remap_pfn_range io_remap_pfn_range
1057
+
1058
+static inline unsigned long __untagged_addr(unsigned long start)
1059
+{
1060
+ if (adi_capable()) {
1061
+ long addr = start;
1062
+
1063
+ /* If userspace has passed a versioned address, kernel
1064
+ * will not find it in the VMAs since it does not store
1065
+ * the version tags in the list of VMAs. Storing version
1066
+ * tags in list of VMAs is impractical since they can be
1067
+ * changed any time from userspace without dropping into
1068
+ * kernel. Any address search in VMAs will be done with
1069
+ * non-versioned addresses. Ensure the ADI version bits
1070
+ * are dropped here by sign extending the last bit before
1071
+ * ADI bits. IOMMU does not implement version tags.
1072
+ */
1073
+ return (addr << (long)adi_nbits()) >> (long)adi_nbits();
1074
+ }
1075
+
1076
+ return start;
1077
+}
1078
+#define untagged_addr(addr) \
1079
+ ((__typeof__(addr))(__untagged_addr((unsigned long)(addr))))
1080
+
1081
+static inline bool pte_access_permitted(pte_t pte, bool write)
1082
+{
1083
+ u64 prot;
1084
+
1085
+ if (tlb_type == hypervisor) {
1086
+ prot = _PAGE_PRESENT_4V | _PAGE_P_4V;
1087
+ if (write)
1088
+ prot |= _PAGE_WRITE_4V;
1089
+ } else {
1090
+ prot = _PAGE_PRESENT_4U | _PAGE_P_4U;
1091
+ if (write)
1092
+ prot |= _PAGE_WRITE_4U;
1093
+ }
1094
+
1095
+ return (pte_val(pte) & (prot | _PAGE_SPECIAL)) == prot;
1096
+}
1097
+#define pte_access_permitted pte_access_permitted
11071098
11081099 #include <asm/tlbflush.h>
1109
-#include <asm-generic/pgtable.h>
11101100
11111101 /* We provide our own get_unmapped_area to cope with VA holes and
11121102 * SHM area cache aliasing for userland.
....@@ -1122,7 +1112,6 @@
11221112 unsigned long);
11231113 #define HAVE_ARCH_FB_UNMAPPED_AREA
11241114
1125
-void pgtable_cache_init(void);
11261115 void sun4v_register_fault_status(void);
11271116 void sun4v_ktsb_register(void);
11281117 void __init cheetah_ecache_flush_init(void);