hc
2024-05-11 297b60346df8beafee954a0fd7c2d64f33f3b9bc
kernel/arch/powerpc/mm/slice.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * address space "slices" (meta-segments) support
34 *
....@@ -6,20 +7,6 @@
67 * Based on hugetlb implementation
78 *
89 * Copyright (C) 2003 David Gibson, IBM Corporation.
9
- *
10
- * This program is free software; you can redistribute it and/or modify
11
- * it under the terms of the GNU General Public License as published by
12
- * the Free Software Foundation; either version 2 of the License, or
13
- * (at your option) any later version.
14
- *
15
- * This program is distributed in the hope that it will be useful,
16
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
17
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18
- * GNU General Public License for more details.
19
- *
20
- * You should have received a copy of the GNU General Public License
21
- * along with this program; if not, write to the Free Software
22
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
2310 */
2411
2512 #undef DEBUG
....@@ -31,6 +18,7 @@
3118 #include <linux/spinlock.h>
3219 #include <linux/export.h>
3320 #include <linux/hugetlb.h>
21
+#include <linux/sched/mm.h>
3422 #include <linux/security.h>
3523 #include <asm/mman.h>
3624 #include <asm/mmu.h>
....@@ -62,7 +50,7 @@
6250
6351 #endif
6452
65
-static inline bool slice_addr_is_low(unsigned long addr)
53
+static inline notrace bool slice_addr_is_low(unsigned long addr)
6654 {
6755 u64 tmp = (u64)addr;
6856
....@@ -100,7 +88,7 @@
10088 {
10189 struct vm_area_struct *vma;
10290
103
- if ((mm->context.slb_addr_limit - len) < addr)
91
+ if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr)
10492 return 0;
10593 vma = find_vma(mm, addr);
10694 return (!vma || (addr + len) <= vm_start_gap(vma));
....@@ -117,13 +105,11 @@
117105 unsigned long start = slice << SLICE_HIGH_SHIFT;
118106 unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
119107
120
-#ifdef CONFIG_PPC64
121108 /* Hack, so that each addresses is controlled by exactly one
122109 * of the high or low area bitmaps, the first high area starts
123110 * at 4GB, not 0 */
124111 if (start == 0)
125
- start = SLICE_LOW_TOP;
126
-#endif
112
+ start = (unsigned long)SLICE_LOW_TOP;
127113
128114 return !slice_area_is_free(mm, start, end - start);
129115 }
....@@ -148,40 +134,6 @@
148134 if (!slice_high_has_vma(mm, i))
149135 __set_bit(i, ret->high_slices);
150136 }
151
-
152
-#ifdef CONFIG_PPC_BOOK3S_64
153
-static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize)
154
-{
155
-#ifdef CONFIG_PPC_64K_PAGES
156
- if (psize == MMU_PAGE_64K)
157
- return &mm->context.mask_64k;
158
-#endif
159
- if (psize == MMU_PAGE_4K)
160
- return &mm->context.mask_4k;
161
-#ifdef CONFIG_HUGETLB_PAGE
162
- if (psize == MMU_PAGE_16M)
163
- return &mm->context.mask_16m;
164
- if (psize == MMU_PAGE_16G)
165
- return &mm->context.mask_16g;
166
-#endif
167
- BUG();
168
-}
169
-#elif defined(CONFIG_PPC_8xx)
170
-static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize)
171
-{
172
- if (psize == mmu_virtual_psize)
173
- return &mm->context.mask_base_psize;
174
-#ifdef CONFIG_HUGETLB_PAGE
175
- if (psize == MMU_PAGE_512K)
176
- return &mm->context.mask_512k;
177
- if (psize == MMU_PAGE_8M)
178
- return &mm->context.mask_8m;
179
-#endif
180
- BUG();
181
-}
182
-#else
183
-#error "Must define the slice masks for page sizes supported by the platform"
184
-#endif
185137
186138 static bool slice_check_range_fits(struct mm_struct *mm,
187139 const struct slice_mask *available,
....@@ -227,7 +179,7 @@
227179 copy_mm_to_paca(current->active_mm);
228180
229181 local_irq_save(flags);
230
- slb_flush_and_rebolt();
182
+ slb_flush_and_restore_bolted();
231183 local_irq_restore(flags);
232184 #endif
233185 }
....@@ -245,14 +197,14 @@
245197 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
246198 slice_print_mask(" mask", mask);
247199
248
- psize_mask = slice_mask_for_size(mm, psize);
200
+ psize_mask = slice_mask_for_size(&mm->context, psize);
249201
250202 /* We need to use a spinlock here to protect against
251203 * concurrent 64k -> 4k demotion ...
252204 */
253205 spin_lock_irqsave(&slice_convert_lock, flags);
254206
255
- lpsizes = mm->context.low_slices_psize;
207
+ lpsizes = mm_ctx_low_slices(&mm->context);
256208 for (i = 0; i < SLICE_NUM_LOW; i++) {
257209 if (!(mask->low_slices & (1u << i)))
258210 continue;
....@@ -262,7 +214,7 @@
262214
263215 /* Update the slice_mask */
264216 old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf;
265
- old_mask = slice_mask_for_size(mm, old_psize);
217
+ old_mask = slice_mask_for_size(&mm->context, old_psize);
266218 old_mask->low_slices &= ~(1u << i);
267219 psize_mask->low_slices |= 1u << i;
268220
....@@ -271,8 +223,8 @@
271223 (((unsigned long)psize) << (mask_index * 4));
272224 }
273225
274
- hpsizes = mm->context.high_slices_psize;
275
- for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
226
+ hpsizes = mm_ctx_high_slices(&mm->context);
227
+ for (i = 0; i < GET_HIGH_SLICE_INDEX(mm_ctx_slb_addr_limit(&mm->context)); i++) {
276228 if (!test_bit(i, mask->high_slices))
277229 continue;
278230
....@@ -281,7 +233,7 @@
281233
282234 /* Update the slice_mask */
283235 old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf;
284
- old_mask = slice_mask_for_size(mm, old_psize);
236
+ old_mask = slice_mask_for_size(&mm->context, old_psize);
285237 __clear_bit(i, old_mask->high_slices);
286238 __set_bit(i, psize_mask->high_slices);
287239
....@@ -291,8 +243,8 @@
291243 }
292244
293245 slice_dbg(" lsps=%lx, hsps=%lx\n",
294
- (unsigned long)mm->context.low_slices_psize,
295
- (unsigned long)mm->context.high_slices_psize);
246
+ (unsigned long)mm_ctx_low_slices(&mm->context),
247
+ (unsigned long)mm_ctx_high_slices(&mm->context));
296248
297249 spin_unlock_irqrestore(&slice_convert_lock, flags);
298250
....@@ -392,7 +344,7 @@
392344 * DEFAULT_MAP_WINDOW we should apply this.
393345 */
394346 if (high_limit > DEFAULT_MAP_WINDOW)
395
- addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW;
347
+ addr += mm_ctx_slb_addr_limit(&mm->context) - DEFAULT_MAP_WINDOW;
396348
397349 while (addr > min_addr) {
398350 info.high_limit = addr;
....@@ -504,20 +456,20 @@
504456 return -ENOMEM;
505457 }
506458
507
- if (high_limit > mm->context.slb_addr_limit) {
459
+ if (high_limit > mm_ctx_slb_addr_limit(&mm->context)) {
508460 /*
509461 * Increasing the slb_addr_limit does not require
510462 * slice mask cache to be recalculated because it should
511463 * be already initialised beyond the old address limit.
512464 */
513
- mm->context.slb_addr_limit = high_limit;
465
+ mm_ctx_set_slb_addr_limit(&mm->context, high_limit);
514466
515467 on_each_cpu(slice_flush_segments, mm, 1);
516468 }
517469
518470 /* Sanity checks */
519471 BUG_ON(mm->task_size == 0);
520
- BUG_ON(mm->context.slb_addr_limit == 0);
472
+ BUG_ON(mm_ctx_slb_addr_limit(&mm->context) == 0);
521473 VM_BUG_ON(radix_enabled());
522474
523475 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
....@@ -526,7 +478,7 @@
526478
527479 /* If hint, make sure it matches our alignment restrictions */
528480 if (!fixed && addr) {
529
- addr = _ALIGN_UP(addr, page_size);
481
+ addr = ALIGN(addr, page_size);
530482 slice_dbg(" aligned addr=%lx\n", addr);
531483 /* Ignore hint if it's too large or overlaps a VMA */
532484 if (addr > high_limit - len || addr < mmap_min_addr ||
....@@ -537,7 +489,7 @@
537489 /* First make up a "good" mask of slices that have the right size
538490 * already
539491 */
540
- maskp = slice_mask_for_size(mm, psize);
492
+ maskp = slice_mask_for_size(&mm->context, psize);
541493
542494 /*
543495 * Here "good" means slices that are already the right page size,
....@@ -564,7 +516,7 @@
564516 * a pointer to good mask for the next code to use.
565517 */
566518 if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
567
- compat_maskp = slice_mask_for_size(mm, MMU_PAGE_4K);
519
+ compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
568520 if (fixed)
569521 slice_or_mask(&good_mask, maskp, compat_maskp);
570522 else
....@@ -641,14 +593,13 @@
641593 newaddr = slice_find_area(mm, len, &potential_mask,
642594 psize, topdown, high_limit);
643595
644
-#ifdef CONFIG_PPC_64K_PAGES
645
- if (newaddr == -ENOMEM && psize == MMU_PAGE_64K) {
596
+ if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && newaddr == -ENOMEM &&
597
+ psize == MMU_PAGE_64K) {
646598 /* retry the search with 4k-page slices included */
647599 slice_or_mask(&potential_mask, &potential_mask, compat_maskp);
648600 newaddr = slice_find_area(mm, len, &potential_mask,
649601 psize, topdown, high_limit);
650602 }
651
-#endif
652603
653604 if (newaddr == -ENOMEM)
654605 return -ENOMEM;
....@@ -695,7 +646,7 @@
695646 unsigned long flags)
696647 {
697648 return slice_get_unmapped_area(addr, len, flags,
698
- current->mm->context.user_psize, 0);
649
+ mm_ctx_user_psize(&current->mm->context), 0);
699650 }
700651
701652 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
....@@ -705,10 +656,10 @@
705656 const unsigned long flags)
706657 {
707658 return slice_get_unmapped_area(addr0, len, flags,
708
- current->mm->context.user_psize, 1);
659
+ mm_ctx_user_psize(&current->mm->context), 1);
709660 }
710661
711
-unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
662
+unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
712663 {
713664 unsigned char *psizes;
714665 int index, mask_index;
....@@ -716,10 +667,10 @@
716667 VM_BUG_ON(radix_enabled());
717668
718669 if (slice_addr_is_low(addr)) {
719
- psizes = mm->context.low_slices_psize;
670
+ psizes = mm_ctx_low_slices(&mm->context);
720671 index = GET_LOW_SLICE_INDEX(addr);
721672 } else {
722
- psizes = mm->context.high_slices_psize;
673
+ psizes = mm_ctx_high_slices(&mm->context);
723674 index = GET_HIGH_SLICE_INDEX(addr);
724675 }
725676 mask_index = index & 0x1;
....@@ -740,31 +691,40 @@
740691 * case of fork it is just inherited from the mm being
741692 * duplicated.
742693 */
743
-#ifdef CONFIG_PPC64
744
- mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
745
-#else
746
- mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW;
747
-#endif
748
-
749
- mm->context.user_psize = psize;
694
+ mm_ctx_set_slb_addr_limit(&mm->context, SLB_ADDR_LIMIT_DEFAULT);
695
+ mm_ctx_set_user_psize(&mm->context, psize);
750696
751697 /*
752698 * Set all slice psizes to the default.
753699 */
754
- lpsizes = mm->context.low_slices_psize;
700
+ lpsizes = mm_ctx_low_slices(&mm->context);
755701 memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
756702
757
- hpsizes = mm->context.high_slices_psize;
703
+ hpsizes = mm_ctx_high_slices(&mm->context);
758704 memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
759705
760706 /*
761707 * Slice mask cache starts zeroed, fill the default size cache.
762708 */
763
- mask = slice_mask_for_size(mm, psize);
709
+ mask = slice_mask_for_size(&mm->context, psize);
764710 mask->low_slices = ~0UL;
765711 if (SLICE_NUM_HIGH)
766712 bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
767713 }
714
+
715
+#ifdef CONFIG_PPC_BOOK3S_64
716
+void slice_setup_new_exec(void)
717
+{
718
+ struct mm_struct *mm = current->mm;
719
+
720
+ slice_dbg("slice_setup_new_exec(mm=%p)\n", mm);
721
+
722
+ if (!is_32bit_task())
723
+ return;
724
+
725
+ mm_ctx_set_slb_addr_limit(&mm->context, DEFAULT_MAP_WINDOW);
726
+}
727
+#endif
768728
769729 void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
770730 unsigned long len, unsigned int psize)
....@@ -801,22 +761,21 @@
801761 unsigned long len)
802762 {
803763 const struct slice_mask *maskp;
804
- unsigned int psize = mm->context.user_psize;
764
+ unsigned int psize = mm_ctx_user_psize(&mm->context);
805765
806766 VM_BUG_ON(radix_enabled());
807767
808
- maskp = slice_mask_for_size(mm, psize);
809
-#ifdef CONFIG_PPC_64K_PAGES
768
+ maskp = slice_mask_for_size(&mm->context, psize);
769
+
810770 /* We need to account for 4k slices too */
811
- if (psize == MMU_PAGE_64K) {
771
+ if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
812772 const struct slice_mask *compat_maskp;
813773 struct slice_mask available;
814774
815
- compat_maskp = slice_mask_for_size(mm, MMU_PAGE_4K);
775
+ compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
816776 slice_or_mask(&available, maskp, compat_maskp);
817777 return !slice_check_range_fits(mm, &available, addr, len);
818778 }
819
-#endif
820779
821780 return !slice_check_range_fits(mm, maskp, addr, len);
822781 }