hc
2024-05-10 cde9070d9970eef1f7ec2360586c802a16230ad8
kernel/arch/s390/include/asm/pgtable.h
....@@ -19,6 +19,7 @@
1919 #include <linux/atomic.h>
2020 #include <asm/bug.h>
2121 #include <asm/page.h>
22
+#include <asm/uv.h>
2223
2324 extern pgd_t swapper_pg_dir[];
2425 extern void paging_init(void);
....@@ -86,7 +87,9 @@
8687 */
8788 extern unsigned long VMALLOC_START;
8889 extern unsigned long VMALLOC_END;
90
+#define VMALLOC_DEFAULT_SIZE ((128UL << 30) - MODULES_LEN)
8991 extern struct page *vmemmap;
92
+extern unsigned long vmemmap_size;
9093
9194 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
9295
....@@ -238,7 +241,7 @@
238241 #define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
239242 #define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
240243 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
241
-#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
244
+#define _REGION_ENTRY_TYPE_MASK 0x0c /* region table type mask */
242245 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
243246 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
244247 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
....@@ -265,11 +268,9 @@
265268 #endif
266269
267270 #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
268
-#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
269271
270272 /* Bits in the segment table entry */
271273 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
272
-#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
273274 #define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
274275 #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
275276 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
....@@ -277,6 +278,7 @@
277278 #define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
278279 #define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
279280 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
281
+#define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */
280282
281283 #define _SEGMENT_ENTRY (0)
282284 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
....@@ -340,6 +342,8 @@
340342 #define PTRS_PER_PUD _CRST_ENTRIES
341343 #define PTRS_PER_P4D _CRST_ENTRIES
342344 #define PTRS_PER_PGD _CRST_ENTRIES
345
+
346
+#define MAX_PTRS_PER_P4D PTRS_PER_P4D
343347
344348 /*
345349 * Segment table and region3 table entry encoding
....@@ -466,6 +470,12 @@
466470 _SEGMENT_ENTRY_YOUNG | \
467471 _SEGMENT_ENTRY_PROTECT | \
468472 _SEGMENT_ENTRY_NOEXEC)
473
+#define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
474
+ _SEGMENT_ENTRY_LARGE | \
475
+ _SEGMENT_ENTRY_READ | \
476
+ _SEGMENT_ENTRY_WRITE | \
477
+ _SEGMENT_ENTRY_YOUNG | \
478
+ _SEGMENT_ENTRY_DIRTY)
469479
470480 /*
471481 * Region3 entry (large page) protection definitions.
....@@ -507,6 +517,15 @@
507517 {
508518 #ifdef CONFIG_PGSTE
509519 if (unlikely(mm->context.has_pgste))
520
+ return 1;
521
+#endif
522
+ return 0;
523
+}
524
+
525
+static inline int mm_is_protected(struct mm_struct *mm)
526
+{
527
+#ifdef CONFIG_PGSTE
528
+ if (unlikely(atomic_read(&mm->context.is_protected)))
510529 return 1;
511530 #endif
512531 return 0;
....@@ -606,15 +625,17 @@
606625
607626 static inline int pgd_bad(pgd_t pgd)
608627 {
609
- /*
610
- * With dynamic page table levels the pgd can be a region table
611
- * entry or a segment table entry. Check for the bit that are
612
- * invalid for either table entry.
613
- */
614
- unsigned long mask =
615
- ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
616
- ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
617
- return (pgd_val(pgd) & mask) != 0;
628
+ if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
629
+ return 0;
630
+ return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
631
+}
632
+
633
+static inline unsigned long pgd_pfn(pgd_t pgd)
634
+{
635
+ unsigned long origin_mask;
636
+
637
+ origin_mask = _REGION_ENTRY_ORIGIN;
638
+ return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
618639 }
619640
620641 static inline int p4d_folded(p4d_t p4d)
....@@ -663,6 +684,7 @@
663684 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
664685 }
665686
687
+#define pud_leaf pud_large
666688 static inline int pud_large(pud_t pud)
667689 {
668690 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
....@@ -670,16 +692,7 @@
670692 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
671693 }
672694
673
-static inline unsigned long pud_pfn(pud_t pud)
674
-{
675
- unsigned long origin_mask;
676
-
677
- origin_mask = _REGION_ENTRY_ORIGIN;
678
- if (pud_large(pud))
679
- origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
680
- return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
681
-}
682
-
695
+#define pmd_leaf pmd_large
683696 static inline int pmd_large(pmd_t pmd)
684697 {
685698 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
....@@ -687,24 +700,30 @@
687700
688701 static inline int pmd_bad(pmd_t pmd)
689702 {
690
- if (pmd_large(pmd))
691
- return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
703
+ if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
704
+ return 1;
692705 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
693706 }
694707
695708 static inline int pud_bad(pud_t pud)
696709 {
697
- if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
698
- return pmd_bad(__pmd(pud_val(pud)));
699
- if (pud_large(pud))
700
- return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
710
+ unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
711
+
712
+ if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
713
+ return 1;
714
+ if (type < _REGION_ENTRY_TYPE_R3)
715
+ return 0;
701716 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
702717 }
703718
704719 static inline int p4d_bad(p4d_t p4d)
705720 {
706
- if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
707
- return pud_bad(__pud(p4d_val(p4d)));
721
+ unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
722
+
723
+ if (type > _REGION_ENTRY_TYPE_R2)
724
+ return 1;
725
+ if (type < _REGION_ENTRY_TYPE_R2)
726
+ return 0;
708727 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
709728 }
710729
....@@ -718,36 +737,26 @@
718737 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
719738 }
720739
721
-static inline unsigned long pmd_pfn(pmd_t pmd)
722
-{
723
- unsigned long origin_mask;
724
-
725
- origin_mask = _SEGMENT_ENTRY_ORIGIN;
726
- if (pmd_large(pmd))
727
- origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
728
- return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
729
-}
730
-
731740 #define pmd_write pmd_write
732741 static inline int pmd_write(pmd_t pmd)
733742 {
734743 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
735744 }
736745
746
+#define pud_write pud_write
747
+static inline int pud_write(pud_t pud)
748
+{
749
+ return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
750
+}
751
+
737752 static inline int pmd_dirty(pmd_t pmd)
738753 {
739
- int dirty = 1;
740
- if (pmd_large(pmd))
741
- dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
742
- return dirty;
754
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
743755 }
744756
745757 static inline int pmd_young(pmd_t pmd)
746758 {
747
- int young = 1;
748
- if (pmd_large(pmd))
749
- young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
750
- return young;
759
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
751760 }
752761
753762 static inline int pte_present(pte_t pte)
....@@ -853,6 +862,25 @@
853862 static inline int pte_unused(pte_t pte)
854863 {
855864 return pte_val(pte) & _PAGE_UNUSED;
865
+}
866
+
867
+/*
868
+ * Extract the pgprot value from the given pte while at the same time making it
869
+ * usable for kernel address space mappings where fault driven dirty and
870
+ * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
871
+ * must not be set.
872
+ */
873
+static inline pgprot_t pte_pgprot(pte_t pte)
874
+{
875
+ unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
876
+
877
+ if (pte_write(pte))
878
+ pte_flags |= pgprot_val(PAGE_KERNEL);
879
+ else
880
+ pte_flags |= pgprot_val(PAGE_KERNEL_RO);
881
+ pte_flags |= pte_val(pte) & mio_wb_bit_mask;
882
+
883
+ return __pgprot(pte_flags);
856884 }
857885
858886 /*
....@@ -975,9 +1003,9 @@
9751003 #define IPTE_NODAT 0x400
9761004 #define IPTE_GUEST_ASCE 0x800
9771005
978
-static inline void __ptep_ipte(unsigned long address, pte_t *ptep,
979
- unsigned long opt, unsigned long asce,
980
- int local)
1006
+static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1007
+ unsigned long opt, unsigned long asce,
1008
+ int local)
9811009 {
9821010 unsigned long pto = (unsigned long) ptep;
9831011
....@@ -998,8 +1026,8 @@
9981026 : [r1] "a" (pto), [m4] "i" (local) : "memory");
9991027 }
10001028
1001
-static inline void __ptep_ipte_range(unsigned long address, int nr,
1002
- pte_t *ptep, int local)
1029
+static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1030
+ pte_t *ptep, int local)
10031031 {
10041032 unsigned long pto = (unsigned long) ptep;
10051033
....@@ -1049,18 +1077,29 @@
10491077 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
10501078 unsigned long addr, pte_t *ptep)
10511079 {
1052
- return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1080
+ pte_t res;
1081
+
1082
+ res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1083
+ if (mm_is_protected(mm) && pte_present(res))
1084
+ uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1085
+ return res;
10531086 }
10541087
10551088 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1056
-pte_t ptep_modify_prot_start(struct mm_struct *, unsigned long, pte_t *);
1057
-void ptep_modify_prot_commit(struct mm_struct *, unsigned long, pte_t *, pte_t);
1089
+pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1090
+void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1091
+ pte_t *, pte_t, pte_t);
10581092
10591093 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
10601094 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
10611095 unsigned long addr, pte_t *ptep)
10621096 {
1063
- return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1097
+ pte_t res;
1098
+
1099
+ res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1100
+ if (mm_is_protected(vma->vm_mm) && pte_present(res))
1101
+ uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1102
+ return res;
10641103 }
10651104
10661105 /*
....@@ -1075,12 +1114,17 @@
10751114 unsigned long addr,
10761115 pte_t *ptep, int full)
10771116 {
1117
+ pte_t res;
1118
+
10781119 if (full) {
1079
- pte_t pte = *ptep;
1120
+ res = *ptep;
10801121 *ptep = __pte(_PAGE_INVALID);
1081
- return pte;
1122
+ } else {
1123
+ res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
10821124 }
1083
- return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1125
+ if (mm_is_protected(mm) && pte_present(res))
1126
+ uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1127
+ return res;
10841128 }
10851129
10861130 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
....@@ -1142,6 +1186,12 @@
11421186 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
11431187 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
11441188
1189
+#define pgprot_writecombine pgprot_writecombine
1190
+pgprot_t pgprot_writecombine(pgprot_t prot);
1191
+
1192
+#define pgprot_writethrough pgprot_writethrough
1193
+pgprot_t pgprot_writethrough(pgprot_t prot);
1194
+
11451195 /*
11461196 * Certain architectures need to do special things when PTEs
11471197 * within a page table are directly modified. Thus, the following
....@@ -1165,7 +1215,8 @@
11651215 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
11661216 {
11671217 pte_t __pte;
1168
- pte_val(__pte) = physpage + pgprot_val(pgprot);
1218
+
1219
+ pte_val(__pte) = physpage | pgprot_val(pgprot);
11691220 if (!MACHINE_HAS_NX)
11701221 pte_val(__pte) &= ~_PAGE_NOEXEC;
11711222 return pte_mkyoung(__pte);
....@@ -1185,42 +1236,116 @@
11851236 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
11861237 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
11871238 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1188
-#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
11891239
1190
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1191
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1192
-
1193
-#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1194
-#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
11951240 #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
11961241 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
11971242
1198
-static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
1243
+static inline unsigned long pmd_deref(pmd_t pmd)
11991244 {
1200
- p4d_t *p4d = (p4d_t *) pgd;
1245
+ unsigned long origin_mask;
12011246
1202
- if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
1203
- p4d = (p4d_t *) pgd_deref(*pgd);
1204
- return p4d + p4d_index(address);
1247
+ origin_mask = _SEGMENT_ENTRY_ORIGIN;
1248
+ if (pmd_large(pmd))
1249
+ origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1250
+ return pmd_val(pmd) & origin_mask;
12051251 }
12061252
1207
-static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
1253
+static inline unsigned long pmd_pfn(pmd_t pmd)
12081254 {
1209
- pud_t *pud = (pud_t *) p4d;
1210
-
1211
- if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1212
- pud = (pud_t *) p4d_deref(*p4d);
1213
- return pud + pud_index(address);
1255
+ return pmd_deref(pmd) >> PAGE_SHIFT;
12141256 }
12151257
1216
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1258
+static inline unsigned long pud_deref(pud_t pud)
12171259 {
1218
- pmd_t *pmd = (pmd_t *) pud;
1260
+ unsigned long origin_mask;
12191261
1220
- if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1221
- pmd = (pmd_t *) pud_deref(*pud);
1222
- return pmd + pmd_index(address);
1262
+ origin_mask = _REGION_ENTRY_ORIGIN;
1263
+ if (pud_large(pud))
1264
+ origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
1265
+ return pud_val(pud) & origin_mask;
12231266 }
1267
+
1268
+static inline unsigned long pud_pfn(pud_t pud)
1269
+{
1270
+ return pud_deref(pud) >> PAGE_SHIFT;
1271
+}
1272
+
1273
+/*
1274
+ * The pgd_offset function *always* adds the index for the top-level
1275
+ * region/segment table. This is done to get a sequence like the
1276
+ * following to work:
1277
+ * pgdp = pgd_offset(current->mm, addr);
1278
+ * pgd = READ_ONCE(*pgdp);
1279
+ * p4dp = p4d_offset(&pgd, addr);
1280
+ * ...
1281
+ * The subsequent p4d_offset, pud_offset and pmd_offset functions
1282
+ * only add an index if they dereferenced the pointer.
1283
+ */
1284
+static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1285
+{
1286
+ unsigned long rste;
1287
+ unsigned int shift;
1288
+
1289
+ /* Get the first entry of the top level table */
1290
+ rste = pgd_val(*pgd);
1291
+ /* Pick up the shift from the table type of the first entry */
1292
+ shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1293
+ return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1294
+}
1295
+
1296
+#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1297
+
1298
+static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
1299
+{
1300
+ if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1301
+ return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
1302
+ return (p4d_t *) pgdp;
1303
+}
1304
+#define p4d_offset_lockless p4d_offset_lockless
1305
+
1306
+static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
1307
+{
1308
+ return p4d_offset_lockless(pgdp, *pgdp, address);
1309
+}
1310
+
1311
+static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
1312
+{
1313
+ if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1314
+ return (pud_t *) p4d_deref(p4d) + pud_index(address);
1315
+ return (pud_t *) p4dp;
1316
+}
1317
+#define pud_offset_lockless pud_offset_lockless
1318
+
1319
+static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
1320
+{
1321
+ return pud_offset_lockless(p4dp, *p4dp, address);
1322
+}
1323
+#define pud_offset pud_offset
1324
+
1325
+static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
1326
+{
1327
+ if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1328
+ return (pmd_t *) pud_deref(pud) + pmd_index(address);
1329
+ return (pmd_t *) pudp;
1330
+}
1331
+#define pmd_offset_lockless pmd_offset_lockless
1332
+
1333
+static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
1334
+{
1335
+ return pmd_offset_lockless(pudp, *pudp, address);
1336
+}
1337
+#define pmd_offset pmd_offset
1338
+
1339
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
1340
+{
1341
+ return (unsigned long) pmd_deref(pmd);
1342
+}
1343
+
1344
+static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1345
+{
1346
+ return end <= current->mm->context.asce_limit;
1347
+}
1348
+#define gup_fast_permitted gup_fast_permitted
12241349
12251350 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
12261351 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
....@@ -1228,13 +1353,8 @@
12281353
12291354 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
12301355 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1231
-#define p4d_page(pud) pfn_to_page(p4d_pfn(p4d))
1232
-
1233
-/* Find an entry in the lowest level page table.. */
1234
-#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1235
-#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1236
-#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1237
-#define pte_unmap(pte) do { } while (0)
1356
+#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1357
+#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
12381358
12391359 static inline pmd_t pmd_wrprotect(pmd_t pmd)
12401360 {
....@@ -1246,29 +1366,23 @@
12461366 static inline pmd_t pmd_mkwrite(pmd_t pmd)
12471367 {
12481368 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1249
- if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1250
- return pmd;
1251
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1369
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
1370
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
12521371 return pmd;
12531372 }
12541373
12551374 static inline pmd_t pmd_mkclean(pmd_t pmd)
12561375 {
1257
- if (pmd_large(pmd)) {
1258
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1259
- pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1260
- }
1376
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1377
+ pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
12611378 return pmd;
12621379 }
12631380
12641381 static inline pmd_t pmd_mkdirty(pmd_t pmd)
12651382 {
1266
- if (pmd_large(pmd)) {
1267
- pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
1268
- _SEGMENT_ENTRY_SOFT_DIRTY;
1269
- if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1270
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1271
- }
1383
+ pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY;
1384
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1385
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
12721386 return pmd;
12731387 }
12741388
....@@ -1282,29 +1396,23 @@
12821396 static inline pud_t pud_mkwrite(pud_t pud)
12831397 {
12841398 pud_val(pud) |= _REGION3_ENTRY_WRITE;
1285
- if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY))
1286
- return pud;
1287
- pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1399
+ if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
1400
+ pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
12881401 return pud;
12891402 }
12901403
12911404 static inline pud_t pud_mkclean(pud_t pud)
12921405 {
1293
- if (pud_large(pud)) {
1294
- pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1295
- pud_val(pud) |= _REGION_ENTRY_PROTECT;
1296
- }
1406
+ pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1407
+ pud_val(pud) |= _REGION_ENTRY_PROTECT;
12971408 return pud;
12981409 }
12991410
13001411 static inline pud_t pud_mkdirty(pud_t pud)
13011412 {
1302
- if (pud_large(pud)) {
1303
- pud_val(pud) |= _REGION3_ENTRY_DIRTY |
1304
- _REGION3_ENTRY_SOFT_DIRTY;
1305
- if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1306
- pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1307
- }
1413
+ pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY;
1414
+ if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1415
+ pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
13081416 return pud;
13091417 }
13101418
....@@ -1328,38 +1436,29 @@
13281436
13291437 static inline pmd_t pmd_mkyoung(pmd_t pmd)
13301438 {
1331
- if (pmd_large(pmd)) {
1332
- pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1333
- if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1334
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1335
- }
1439
+ pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1440
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1441
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
13361442 return pmd;
13371443 }
13381444
13391445 static inline pmd_t pmd_mkold(pmd_t pmd)
13401446 {
1341
- if (pmd_large(pmd)) {
1342
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1343
- pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1344
- }
1447
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1448
+ pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
13451449 return pmd;
13461450 }
13471451
13481452 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
13491453 {
1350
- if (pmd_large(pmd)) {
1351
- pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1352
- _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1353
- _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1354
- pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1355
- if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1356
- pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1357
- if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1358
- pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1359
- return pmd;
1360
- }
1361
- pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1454
+ pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1455
+ _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1456
+ _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
13621457 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1458
+ if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1459
+ pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1460
+ if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1461
+ pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
13631462 return pmd;
13641463 }
13651464
....@@ -1385,9 +1484,9 @@
13851484 #define IDTE_NODAT 0x1000
13861485 #define IDTE_GUEST_ASCE 0x2000
13871486
1388
-static inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1389
- unsigned long opt, unsigned long asce,
1390
- int local)
1487
+static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1488
+ unsigned long opt, unsigned long asce,
1489
+ int local)
13911490 {
13921491 unsigned long sto;
13931492
....@@ -1411,9 +1510,9 @@
14111510 }
14121511 }
14131512
1414
-static inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1415
- unsigned long opt, unsigned long asce,
1416
- int local)
1513
+static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1514
+ unsigned long opt, unsigned long asce,
1515
+ int local)
14171516 {
14181517 unsigned long r3o;
14191518
....@@ -1509,7 +1608,7 @@
15091608 }
15101609
15111610 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1512
-static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
1611
+static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
15131612 unsigned long addr,
15141613 pmd_t *pmdp, int full)
15151614 {
....@@ -1518,7 +1617,7 @@
15181617 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
15191618 return pmd;
15201619 }
1521
- return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1620
+ return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
15221621 }
15231622
15241623 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
....@@ -1623,7 +1722,7 @@
16231722 #define kern_addr_valid(addr) (1)
16241723
16251724 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1626
-extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1725
+extern void vmem_remove_mapping(unsigned long start, unsigned long size);
16271726 extern int s390_enable_sie(void);
16281727 extern int s390_enable_skey(void);
16291728 extern void s390_reset_cmma(struct mm_struct *mm);
....@@ -1631,13 +1730,5 @@
16311730 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
16321731 #define HAVE_ARCH_UNMAPPED_AREA
16331732 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1634
-
1635
-/*
1636
- * No page table caches to initialise
1637
- */
1638
-static inline void pgtable_cache_init(void) { }
1639
-static inline void check_pgt_cache(void) { }
1640
-
1641
-#include <asm-generic/pgtable.h>
16421733
16431734 #endif /* _S390_PAGE_H */