forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/arch/arm64/kernel/vdso.c
....@@ -1,19 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * VDSO implementations.
34 *
45 * Copyright (C) 2012 ARM Limited
5
- *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License version 2 as
8
- * published by the Free Software Foundation.
9
- *
10
- * This program is distributed in the hope that it will be useful,
11
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
- * GNU General Public License for more details.
14
- *
15
- * You should have received a copy of the GNU General Public License
16
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
176 *
187 * Author: Will Deacon <will.deacon@arm.com>
198 */
....@@ -29,6 +18,7 @@
2918 #include <linux/sched.h>
3019 #include <linux/signal.h>
3120 #include <linux/slab.h>
21
+#include <linux/time_namespace.h>
3222 #include <linux/timekeeper_internal.h>
3323 #include <linux/vmalloc.h>
3424 #include <vdso/datapage.h>
....@@ -40,24 +30,20 @@
4030 #include <asm/vdso.h>
4131
4232 extern char vdso_start[], vdso_end[];
43
-#ifdef CONFIG_COMPAT_VDSO
4433 extern char vdso32_start[], vdso32_end[];
45
-#endif /* CONFIG_COMPAT_VDSO */
4634
47
-/* vdso_lookup arch_index */
48
-enum arch_vdso_type {
49
- ARM64_VDSO = 0,
50
-#ifdef CONFIG_COMPAT_VDSO
51
- ARM64_VDSO32 = 1,
52
-#endif /* CONFIG_COMPAT_VDSO */
35
+enum vdso_abi {
36
+ VDSO_ABI_AA64,
37
+ VDSO_ABI_AA32,
5338 };
54
-#ifdef CONFIG_COMPAT_VDSO
55
-#define VDSO_TYPES (ARM64_VDSO32 + 1)
56
-#else
57
-#define VDSO_TYPES (ARM64_VDSO + 1)
58
-#endif /* CONFIG_COMPAT_VDSO */
5939
60
-struct __vdso_abi {
40
+enum vvar_pages {
41
+ VVAR_DATA_PAGE_OFFSET,
42
+ VVAR_TIMENS_PAGE_OFFSET,
43
+ VVAR_NR_PAGES,
44
+};
45
+
46
+struct vdso_abi_info {
6147 const char *name;
6248 const char *vdso_code_start;
6349 const char *vdso_code_end;
....@@ -68,14 +54,14 @@
6854 struct vm_special_mapping *cm;
6955 };
7056
71
-static struct __vdso_abi vdso_lookup[VDSO_TYPES] __ro_after_init = {
72
- {
57
+static struct vdso_abi_info vdso_info[] __ro_after_init = {
58
+ [VDSO_ABI_AA64] = {
7359 .name = "vdso",
7460 .vdso_code_start = vdso_start,
7561 .vdso_code_end = vdso_end,
7662 },
7763 #ifdef CONFIG_COMPAT_VDSO
78
- {
64
+ [VDSO_ABI_AA32] = {
7965 .name = "vdso32",
8066 .vdso_code_start = vdso32_start,
8167 .vdso_code_end = vdso32_end,
....@@ -92,13 +78,13 @@
9278 } vdso_data_store __page_aligned_data;
9379 struct vdso_data *vdso_data = vdso_data_store.data;
9480
95
-static int __vdso_remap(enum arch_vdso_type arch_index,
81
+static int __vdso_remap(enum vdso_abi abi,
9682 const struct vm_special_mapping *sm,
9783 struct vm_area_struct *new_vma)
9884 {
9985 unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
100
- unsigned long vdso_size = vdso_lookup[arch_index].vdso_code_end -
101
- vdso_lookup[arch_index].vdso_code_start;
86
+ unsigned long vdso_size = vdso_info[abi].vdso_code_end -
87
+ vdso_info[abi].vdso_code_start;
10288
10389 if (vdso_size != new_size)
10490 return -EINVAL;
....@@ -108,56 +94,156 @@
10894 return 0;
10995 }
11096
111
-static int __vdso_init(enum arch_vdso_type arch_index)
97
+static int __vdso_init(enum vdso_abi abi)
11298 {
11399 int i;
114100 struct page **vdso_pagelist;
115101 unsigned long pfn;
116102
117
- if (memcmp(vdso_lookup[arch_index].vdso_code_start, "\177ELF", 4)) {
103
+ if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) {
118104 pr_err("vDSO is not a valid ELF object!\n");
119105 return -EINVAL;
120106 }
121107
122
- vdso_lookup[arch_index].vdso_pages = (
123
- vdso_lookup[arch_index].vdso_code_end -
124
- vdso_lookup[arch_index].vdso_code_start) >>
108
+ vdso_info[abi].vdso_pages = (
109
+ vdso_info[abi].vdso_code_end -
110
+ vdso_info[abi].vdso_code_start) >>
125111 PAGE_SHIFT;
126112
127
- /* Allocate the vDSO pagelist, plus a page for the data. */
128
- vdso_pagelist = kcalloc(vdso_lookup[arch_index].vdso_pages + 1,
113
+ vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages,
129114 sizeof(struct page *),
130115 GFP_KERNEL);
131116 if (vdso_pagelist == NULL)
132117 return -ENOMEM;
133118
134
- /* Grab the vDSO data page. */
135
- vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
136
-
137
-
138119 /* Grab the vDSO code pages. */
139
- pfn = sym_to_pfn(vdso_lookup[arch_index].vdso_code_start);
120
+ pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
140121
141
- for (i = 0; i < vdso_lookup[arch_index].vdso_pages; i++)
142
- vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
122
+ for (i = 0; i < vdso_info[abi].vdso_pages; i++)
123
+ vdso_pagelist[i] = pfn_to_page(pfn + i);
143124
144
- vdso_lookup[arch_index].dm->pages = &vdso_pagelist[0];
145
- vdso_lookup[arch_index].cm->pages = &vdso_pagelist[1];
125
+ vdso_info[abi].cm->pages = vdso_pagelist;
146126
147127 return 0;
148128 }
149129
150
-static int __setup_additional_pages(enum arch_vdso_type arch_index,
130
+#ifdef CONFIG_TIME_NS
131
+struct vdso_data *arch_get_vdso_data(void *vvar_page)
132
+{
133
+ return (struct vdso_data *)(vvar_page);
134
+}
135
+
136
+/*
137
+ * The vvar mapping contains data for a specific time namespace, so when a task
138
+ * changes namespace we must unmap its vvar data for the old namespace.
139
+ * Subsequent faults will map in data for the new namespace.
140
+ *
141
+ * For more details see timens_setup_vdso_data().
142
+ */
143
+int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
144
+{
145
+ struct mm_struct *mm = task->mm;
146
+ struct vm_area_struct *vma;
147
+
148
+ mmap_read_lock(mm);
149
+
150
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
151
+ unsigned long size = vma->vm_end - vma->vm_start;
152
+
153
+ if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm))
154
+ zap_page_range(vma, vma->vm_start, size);
155
+#ifdef CONFIG_COMPAT_VDSO
156
+ if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm))
157
+ zap_page_range(vma, vma->vm_start, size);
158
+#endif
159
+ }
160
+
161
+ mmap_read_unlock(mm);
162
+ return 0;
163
+}
164
+
165
+static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
166
+{
167
+ if (likely(vma->vm_mm == current->mm))
168
+ return current->nsproxy->time_ns->vvar_page;
169
+
170
+ /*
171
+ * VM_PFNMAP | VM_IO protect .fault() handler from being called
172
+ * through interfaces like /proc/$pid/mem or
173
+ * process_vm_{readv,writev}() as long as there's no .access()
174
+ * in special_mapping_vmops.
175
+ * For more details check_vma_flags() and __access_remote_vm()
176
+ */
177
+ WARN(1, "vvar_page accessed remotely");
178
+
179
+ return NULL;
180
+}
181
+#else
182
+static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
183
+{
184
+ return NULL;
185
+}
186
+#endif
187
+
188
+static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
189
+ struct vm_area_struct *vma, struct vm_fault *vmf)
190
+{
191
+ struct page *timens_page = find_timens_vvar_page(vma);
192
+ unsigned long pfn;
193
+
194
+ switch (vmf->pgoff) {
195
+ case VVAR_DATA_PAGE_OFFSET:
196
+ if (timens_page)
197
+ pfn = page_to_pfn(timens_page);
198
+ else
199
+ pfn = sym_to_pfn(vdso_data);
200
+ break;
201
+#ifdef CONFIG_TIME_NS
202
+ case VVAR_TIMENS_PAGE_OFFSET:
203
+ /*
204
+ * If a task belongs to a time namespace then a namespace
205
+ * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
206
+ * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
207
+ * offset.
208
+ * See also the comment near timens_setup_vdso_data().
209
+ */
210
+ if (!timens_page)
211
+ return VM_FAULT_SIGBUS;
212
+ pfn = sym_to_pfn(vdso_data);
213
+ break;
214
+#endif /* CONFIG_TIME_NS */
215
+ default:
216
+ return VM_FAULT_SIGBUS;
217
+ }
218
+
219
+ return vmf_insert_pfn(vma, vmf->address, pfn);
220
+}
221
+
222
+static int vvar_mremap(const struct vm_special_mapping *sm,
223
+ struct vm_area_struct *new_vma)
224
+{
225
+ unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
226
+
227
+ if (new_size != VVAR_NR_PAGES * PAGE_SIZE)
228
+ return -EINVAL;
229
+
230
+ return 0;
231
+}
232
+
233
+static int __setup_additional_pages(enum vdso_abi abi,
151234 struct mm_struct *mm,
152235 struct linux_binprm *bprm,
153236 int uses_interp)
154237 {
155238 unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
239
+ unsigned long gp_flags = 0;
156240 void *ret;
157241
158
- vdso_text_len = vdso_lookup[arch_index].vdso_pages << PAGE_SHIFT;
242
+ BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
243
+
244
+ vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
159245 /* Be sure to map the data page */
160
- vdso_mapping_len = vdso_text_len + PAGE_SIZE;
246
+ vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
161247
162248 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
163249 if (IS_ERR_VALUE(vdso_base)) {
....@@ -165,18 +251,21 @@
165251 goto up_fail;
166252 }
167253
168
- ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
169
- VM_READ|VM_MAYREAD,
170
- vdso_lookup[arch_index].dm);
254
+ ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
255
+ VM_READ|VM_MAYREAD|VM_PFNMAP,
256
+ vdso_info[abi].dm);
171257 if (IS_ERR(ret))
172258 goto up_fail;
173259
174
- vdso_base += PAGE_SIZE;
260
+ if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
261
+ gp_flags = VM_ARM64_BTI;
262
+
263
+ vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
175264 mm->context.vdso = (void *)vdso_base;
176265 ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
177
- VM_READ|VM_EXEC|
266
+ VM_READ|VM_EXEC|gp_flags|
178267 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
179
- vdso_lookup[arch_index].cm);
268
+ vdso_info[abi].cm);
180269 if (IS_ERR(ret))
181270 goto up_fail;
182271
....@@ -191,52 +280,40 @@
191280 /*
192281 * Create and map the vectors page for AArch32 tasks.
193282 */
194
-#ifdef CONFIG_COMPAT_VDSO
195283 static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
196284 struct vm_area_struct *new_vma)
197285 {
198
- return __vdso_remap(ARM64_VDSO32, sm, new_vma);
286
+ return __vdso_remap(VDSO_ABI_AA32, sm, new_vma);
199287 }
200
-#endif /* CONFIG_COMPAT_VDSO */
201288
202
-/*
203
- * aarch32_vdso_pages:
204
- * 0 - kuser helpers
205
- * 1 - sigreturn code
206
- * or (CONFIG_COMPAT_VDSO):
207
- * 0 - kuser helpers
208
- * 1 - vdso data
209
- * 2 - vdso code
210
- */
211
-#define C_VECTORS 0
212
-#ifdef CONFIG_COMPAT_VDSO
213
-#define C_VVAR 1
214
-#define C_VDSO 2
215
-#define C_PAGES (C_VDSO + 1)
216
-#else
217
-#define C_SIGPAGE 1
218
-#define C_PAGES (C_SIGPAGE + 1)
219
-#endif /* CONFIG_COMPAT_VDSO */
220
-static struct page *aarch32_vdso_pages[C_PAGES] __ro_after_init;
221
-static struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = {
222
- {
289
+enum aarch32_map {
290
+ AA32_MAP_VECTORS, /* kuser helpers */
291
+ AA32_MAP_SIGPAGE,
292
+ AA32_MAP_VVAR,
293
+ AA32_MAP_VDSO,
294
+};
295
+
296
+static struct page *aarch32_vectors_page __ro_after_init;
297
+static struct page *aarch32_sig_page __ro_after_init;
298
+
299
+static struct vm_special_mapping aarch32_vdso_maps[] = {
300
+ [AA32_MAP_VECTORS] = {
223301 .name = "[vectors]", /* ABI */
224
- .pages = &aarch32_vdso_pages[C_VECTORS],
302
+ .pages = &aarch32_vectors_page,
225303 },
226
-#ifdef CONFIG_COMPAT_VDSO
227
- {
304
+ [AA32_MAP_SIGPAGE] = {
305
+ .name = "[sigpage]", /* ABI */
306
+ .pages = &aarch32_sig_page,
307
+ },
308
+ [AA32_MAP_VVAR] = {
228309 .name = "[vvar]",
310
+ .fault = vvar_fault,
311
+ .mremap = vvar_mremap,
229312 },
230
- {
313
+ [AA32_MAP_VDSO] = {
231314 .name = "[vdso]",
232315 .mremap = aarch32_vdso_mremap,
233316 },
234
-#else
235
- {
236
- .name = "[sigpage]", /* ABI */
237
- .pages = &aarch32_vdso_pages[C_SIGPAGE],
238
- },
239
-#endif /* CONFIG_COMPAT_VDSO */
240317 };
241318
242319 static int aarch32_alloc_kuser_vdso_page(void)
....@@ -254,52 +331,52 @@
254331
255332 memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
256333 kuser_sz);
257
- aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_page);
258
- flush_dcache_page(aarch32_vdso_pages[C_VECTORS]);
334
+ aarch32_vectors_page = virt_to_page(vdso_page);
335
+ flush_dcache_page(aarch32_vectors_page);
259336 return 0;
260337 }
261338
262
-#ifdef CONFIG_COMPAT_VDSO
263
-static int __aarch32_alloc_vdso_pages(void)
264
-{
265
- int ret;
266
-
267
- vdso_lookup[ARM64_VDSO32].dm = &aarch32_vdso_spec[C_VVAR];
268
- vdso_lookup[ARM64_VDSO32].cm = &aarch32_vdso_spec[C_VDSO];
269
-
270
- ret = __vdso_init(ARM64_VDSO32);
271
- if (ret)
272
- return ret;
273
-
274
- return aarch32_alloc_kuser_vdso_page();
275
-}
276
-#else
277
-static int __aarch32_alloc_vdso_pages(void)
339
+static int aarch32_alloc_sigpage(void)
278340 {
279341 extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
280342 int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
281343 unsigned long sigpage;
282
- int ret;
283344
284345 sigpage = get_zeroed_page(GFP_ATOMIC);
285346 if (!sigpage)
286347 return -ENOMEM;
287348
288349 memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
289
- aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(sigpage);
290
- flush_dcache_page(aarch32_vdso_pages[C_SIGPAGE]);
291
-
292
- ret = aarch32_alloc_kuser_vdso_page();
293
- if (ret)
294
- free_page(sigpage);
295
-
296
- return ret;
350
+ aarch32_sig_page = virt_to_page(sigpage);
351
+ flush_dcache_page(aarch32_sig_page);
352
+ return 0;
297353 }
298
-#endif /* CONFIG_COMPAT_VDSO */
354
+
355
+static int __aarch32_alloc_vdso_pages(void)
356
+{
357
+
358
+ if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
359
+ return 0;
360
+
361
+ vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
362
+ vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
363
+
364
+ return __vdso_init(VDSO_ABI_AA32);
365
+}
299366
300367 static int __init aarch32_alloc_vdso_pages(void)
301368 {
302
- return __aarch32_alloc_vdso_pages();
369
+ int ret;
370
+
371
+ ret = __aarch32_alloc_vdso_pages();
372
+ if (ret)
373
+ return ret;
374
+
375
+ ret = aarch32_alloc_sigpage();
376
+ if (ret)
377
+ return ret;
378
+
379
+ return aarch32_alloc_kuser_vdso_page();
303380 }
304381 arch_initcall(aarch32_alloc_vdso_pages);
305382
....@@ -317,12 +394,11 @@
317394 ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
318395 VM_READ | VM_EXEC |
319396 VM_MAYREAD | VM_MAYEXEC,
320
- &aarch32_vdso_spec[C_VECTORS]);
397
+ &aarch32_vdso_maps[AA32_MAP_VECTORS]);
321398
322399 return PTR_ERR_OR_ZERO(ret);
323400 }
324401
325
-#ifndef CONFIG_COMPAT_VDSO
326402 static int aarch32_sigreturn_setup(struct mm_struct *mm)
327403 {
328404 unsigned long addr;
....@@ -341,40 +417,38 @@
341417 ret = _install_special_mapping(mm, addr, PAGE_SIZE,
342418 VM_READ | VM_EXEC | VM_MAYREAD |
343419 VM_MAYWRITE | VM_MAYEXEC,
344
- &aarch32_vdso_spec[C_SIGPAGE]);
420
+ &aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
345421 if (IS_ERR(ret))
346422 goto out;
347423
348
- mm->context.vdso = (void *)addr;
424
+ mm->context.sigpage = (void *)addr;
349425
350426 out:
351427 return PTR_ERR_OR_ZERO(ret);
352428 }
353
-#endif /* !CONFIG_COMPAT_VDSO */
354429
355430 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
356431 {
357432 struct mm_struct *mm = current->mm;
358433 int ret;
359434
360
- if (down_write_killable(&mm->mmap_sem))
435
+ if (mmap_write_lock_killable(mm))
361436 return -EINTR;
362437
363438 ret = aarch32_kuser_helpers_setup(mm);
364439 if (ret)
365440 goto out;
366441
367
-#ifdef CONFIG_COMPAT_VDSO
368
- ret = __setup_additional_pages(ARM64_VDSO32,
369
- mm,
370
- bprm,
371
- uses_interp);
372
-#else
373
- ret = aarch32_sigreturn_setup(mm);
374
-#endif /* CONFIG_COMPAT_VDSO */
442
+ if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
443
+ ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm,
444
+ uses_interp);
445
+ if (ret)
446
+ goto out;
447
+ }
375448
449
+ ret = aarch32_sigreturn_setup(mm);
376450 out:
377
- up_write(&mm->mmap_sem);
451
+ mmap_write_unlock(mm);
378452 return ret;
379453 }
380454 #endif /* CONFIG_COMPAT */
....@@ -382,22 +456,21 @@
382456 static int vdso_mremap(const struct vm_special_mapping *sm,
383457 struct vm_area_struct *new_vma)
384458 {
385
- return __vdso_remap(ARM64_VDSO, sm, new_vma);
459
+ return __vdso_remap(VDSO_ABI_AA64, sm, new_vma);
386460 }
387461
388
-/*
389
- * aarch64_vdso_pages:
390
- * 0 - vvar
391
- * 1 - vdso
392
- */
393
-#define A_VVAR 0
394
-#define A_VDSO 1
395
-#define A_PAGES (A_VDSO + 1)
396
-static struct vm_special_mapping vdso_spec[A_PAGES] __ro_after_init = {
397
- {
462
+enum aarch64_map {
463
+ AA64_MAP_VVAR,
464
+ AA64_MAP_VDSO,
465
+};
466
+
467
+static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
468
+ [AA64_MAP_VVAR] = {
398469 .name = "[vvar]",
470
+ .fault = vvar_fault,
471
+ .mremap = vvar_mremap,
399472 },
400
- {
473
+ [AA64_MAP_VDSO] = {
401474 .name = "[vdso]",
402475 .mremap = vdso_mremap,
403476 },
....@@ -405,28 +478,23 @@
405478
406479 static int __init vdso_init(void)
407480 {
408
- vdso_lookup[ARM64_VDSO].dm = &vdso_spec[A_VVAR];
409
- vdso_lookup[ARM64_VDSO].cm = &vdso_spec[A_VDSO];
481
+ vdso_info[VDSO_ABI_AA64].dm = &aarch64_vdso_maps[AA64_MAP_VVAR];
482
+ vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_maps[AA64_MAP_VDSO];
410483
411
- return __vdso_init(ARM64_VDSO);
484
+ return __vdso_init(VDSO_ABI_AA64);
412485 }
413486 arch_initcall(vdso_init);
414487
415
-int arch_setup_additional_pages(struct linux_binprm *bprm,
416
- int uses_interp)
488
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
417489 {
418490 struct mm_struct *mm = current->mm;
419491 int ret;
420492
421
- if (down_write_killable(&mm->mmap_sem))
493
+ if (mmap_write_lock_killable(mm))
422494 return -EINTR;
423495
424
- ret = __setup_additional_pages(ARM64_VDSO,
425
- mm,
426
- bprm,
427
- uses_interp);
428
-
429
- up_write(&mm->mmap_sem);
496
+ ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp);
497
+ mmap_write_unlock(mm);
430498
431499 return ret;
432500 }