hc
2024-10-09 05e59e5fb0064c97a1c10921ecd549f2d4a58565
kernel/arch/s390/kernel/vdso.c
....@@ -18,23 +18,17 @@
1818 #include <linux/user.h>
1919 #include <linux/elf.h>
2020 #include <linux/security.h>
21
-#include <linux/bootmem.h>
21
+#include <linux/memblock.h>
2222 #include <linux/compat.h>
23
+#include <linux/binfmts.h>
24
+#include <vdso/datapage.h>
2325 #include <asm/asm-offsets.h>
24
-#include <asm/pgtable.h>
2526 #include <asm/processor.h>
2627 #include <asm/mmu.h>
2728 #include <asm/mmu_context.h>
2829 #include <asm/sections.h>
2930 #include <asm/vdso.h>
3031 #include <asm/facility.h>
31
-
32
-#ifdef CONFIG_COMPAT
33
-extern char vdso32_start, vdso32_end;
34
-static void *vdso32_kbase = &vdso32_start;
35
-static unsigned int vdso32_pages;
36
-static struct page **vdso32_pagelist;
37
-#endif
3832
3933 extern char vdso64_start, vdso64_end;
4034 static void *vdso64_kbase = &vdso64_start;
....@@ -55,12 +49,6 @@
5549
5650 vdso_pagelist = vdso64_pagelist;
5751 vdso_pages = vdso64_pages;
58
-#ifdef CONFIG_COMPAT
59
- if (vma->vm_mm->context.compat_mm) {
60
- vdso_pagelist = vdso32_pagelist;
61
- vdso_pages = vdso32_pages;
62
- }
63
-#endif
6452
6553 if (vmf->pgoff >= vdso_pages)
6654 return VM_FAULT_SIGBUS;
....@@ -76,10 +64,6 @@
7664 unsigned long vdso_pages;
7765
7866 vdso_pages = vdso64_pages;
79
-#ifdef CONFIG_COMPAT
80
- if (vma->vm_mm->context.compat_mm)
81
- vdso_pages = vdso32_pages;
82
-#endif
8367
8468 if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start)
8569 return -EINVAL;
....@@ -97,21 +81,13 @@
9781 .mremap = vdso_mremap,
9882 };
9983
100
-static int __init vdso_setup(char *s)
84
+static int __init vdso_setup(char *str)
10185 {
102
- unsigned long val;
103
- int rc;
86
+ bool enabled;
10487
105
- rc = 0;
106
- if (strncmp(s, "on", 3) == 0)
107
- vdso_enabled = 1;
108
- else if (strncmp(s, "off", 4) == 0)
109
- vdso_enabled = 0;
110
- else {
111
- rc = kstrtoul(s, 0, &val);
112
- vdso_enabled = rc ? 0 : !!val;
113
- }
114
- return !rc;
88
+ if (!kstrtobool(str, &enabled))
89
+ vdso_enabled = enabled;
90
+ return 1;
11591 }
11692 __setup("vdso=", vdso_setup);
11793
....@@ -122,34 +98,11 @@
12298 struct vdso_data data;
12399 u8 page[PAGE_SIZE];
124100 } vdso_data_store __page_aligned_data;
125
-struct vdso_data *vdso_data = &vdso_data_store.data;
126
-
127
-/*
128
- * Setup vdso data page.
129
- */
130
-static void __init vdso_init_data(struct vdso_data *vd)
131
-{
132
- vd->ectg_available = test_facility(31);
133
-}
134
-
101
+struct vdso_data *vdso_data = (struct vdso_data *)&vdso_data_store.data;
135102 /*
136103 * Allocate/free per cpu vdso data.
137104 */
138105 #define SEGMENT_ORDER 2
139
-
140
-/*
141
- * The initial vdso_data structure for the boot CPU. Eventually
142
- * it is replaced with a properly allocated structure in vdso_init.
143
- * This is necessary because a valid S390_lowcore.vdso_per_cpu_data
144
- * pointer is required to be able to return from an interrupt or
145
- * program check. See the exit paths in entry.S.
146
- */
147
-struct vdso_data boot_vdso_data __initdata;
148
-
149
-void __init vdso_alloc_boot_cpu(struct lowcore *lowcore)
150
-{
151
- lowcore->vdso_per_cpu_data = (unsigned long) &boot_vdso_data;
152
-}
153106
154107 int vdso_alloc_per_cpu(struct lowcore *lowcore)
155108 {
....@@ -216,18 +169,11 @@
216169
217170 if (!vdso_enabled)
218171 return 0;
219
- /*
220
- * Only map the vdso for dynamically linked elf binaries.
221
- */
222
- if (!uses_interp)
172
+
173
+ if (is_compat_task())
223174 return 0;
224175
225176 vdso_pages = vdso64_pages;
226
-#ifdef CONFIG_COMPAT
227
- mm->context.compat_mm = is_compat_task();
228
- if (mm->context.compat_mm)
229
- vdso_pages = vdso32_pages;
230
-#endif
231177 /*
232178 * vDSO has a problem and was disabled, just don't "enable" it for
233179 * the process
....@@ -240,7 +186,7 @@
240186 * it at vdso_base which is the "natural" base for it, but we might
241187 * fail and end up putting it elsewhere.
242188 */
243
- if (down_write_killable(&mm->mmap_sem))
189
+ if (mmap_write_lock_killable(mm))
244190 return -EINTR;
245191 vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
246192 if (IS_ERR_VALUE(vdso_base)) {
....@@ -271,33 +217,13 @@
271217 rc = 0;
272218
273219 out_up:
274
- up_write(&mm->mmap_sem);
220
+ mmap_write_unlock(mm);
275221 return rc;
276222 }
277223
278224 static int __init vdso_init(void)
279225 {
280226 int i;
281
-
282
- vdso_init_data(vdso_data);
283
-#ifdef CONFIG_COMPAT
284
- /* Calculate the size of the 32 bit vDSO */
285
- vdso32_pages = ((&vdso32_end - &vdso32_start
286
- + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
287
-
288
- /* Make sure pages are in the correct state */
289
- vdso32_pagelist = kcalloc(vdso32_pages + 1, sizeof(struct page *),
290
- GFP_KERNEL);
291
- BUG_ON(vdso32_pagelist == NULL);
292
- for (i = 0; i < vdso32_pages - 1; i++) {
293
- struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
294
- ClearPageReserved(pg);
295
- get_page(pg);
296
- vdso32_pagelist[i] = pg;
297
- }
298
- vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
299
- vdso32_pagelist[vdso32_pages] = NULL;
300
-#endif
301227
302228 /* Calculate the size of the 64 bit vDSO */
303229 vdso64_pages = ((&vdso64_end - &vdso64_start
....@@ -309,7 +235,6 @@
309235 BUG_ON(vdso64_pagelist == NULL);
310236 for (i = 0; i < vdso64_pages - 1; i++) {
311237 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
312
- ClearPageReserved(pg);
313238 get_page(pg);
314239 vdso64_pagelist[i] = pg;
315240 }