From 830ce1f69238136c0197858242f16cf44e0d6cb9 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 01 Nov 2024 03:09:37 +0000
Subject: [PATCH] gpio config
---
kernel/arch/arm/kernel/vdso.c | 51 +++++++++++++++++++++++++++++++++++++++++----------
1 files changed, 41 insertions(+), 10 deletions(-)
diff --git a/kernel/arch/arm/kernel/vdso.c b/kernel/arch/arm/kernel/vdso.c
index fddd08a..557fb35 100644
--- a/kernel/arch/arm/kernel/vdso.c
+++ b/kernel/arch/arm/kernel/vdso.c
@@ -32,7 +32,10 @@
extern char vdso_start[], vdso_end[];
-/* Total number of pages needed for the data and text portions of the VDSO. */
+/*
+ * Total number of pages needed for the data, private and text
+ * portions of the VDSO.
+ */
unsigned int vdso_total_pages __ro_after_init;
/*
@@ -53,8 +56,8 @@
unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
unsigned long vdso_size;
- /* without VVAR page */
- vdso_size = (vdso_total_pages - 1) << PAGE_SHIFT;
+ /* without VVAR and VPRIV pages */
+ vdso_size = (vdso_total_pages - 2) << PAGE_SHIFT;
if (vdso_size != new_size)
return -EINVAL;
@@ -180,8 +183,10 @@
/* If the virtual counter is absent or non-functional we don't
* want programs to incur the slight additional overhead of
* dispatching through the VDSO only to fall back to syscalls.
+ * However, if clocksources supporting generic MMIO access can
+ * be reached via the vDSO, keep this fast path enabled.
*/
- if (!cntvct_ok) {
+ if (!cntvct_ok && !IS_ENABLED(CONFIG_GENERIC_CLOCKSOURCE_VDSO)) {
vdso_nullpatch_one(&einfo, "__vdso_gettimeofday");
vdso_nullpatch_one(&einfo, "__vdso_clock_gettime");
vdso_nullpatch_one(&einfo, "__vdso_clock_gettime64");
@@ -219,16 +224,26 @@
vdso_text_mapping.pages = vdso_text_pagelist;
- vdso_total_pages = 1; /* for the data/vvar page */
+ vdso_total_pages = 2; /* for the data/vvar and vpriv pages */
vdso_total_pages += text_pages;
cntvct_ok = cntvct_functional();
patch_vdso(vdso_start);
+#ifdef CONFIG_GENERIC_CLOCKSOURCE_VDSO
+ vdso_data->cs_type_seq = CLOCKSOURCE_VDSO_NONE << 16 | 1;
+#endif
return 0;
}
arch_initcall(vdso_init);
+
+static int install_vpriv(struct mm_struct *mm, unsigned long addr)
+{
+ return mmap_region(NULL, addr, PAGE_SIZE,
+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
+ 0, NULL) != addr ? -EINVAL : 0;
+}
static int install_vvar(struct mm_struct *mm, unsigned long addr)
{
@@ -237,8 +252,13 @@
vma = _install_special_mapping(mm, addr, PAGE_SIZE,
VM_READ | VM_MAYREAD,
&vdso_data_mapping);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
- return PTR_ERR_OR_ZERO(vma);
+ if (cache_is_vivt())
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ return vma->vm_start != addr ? -EINVAL : 0;
}
/* assumes mmap_lock is write-locked */
@@ -252,18 +272,29 @@
if (vdso_text_pagelist == NULL)
return;
- if (install_vvar(mm, addr))
+ if (install_vpriv(mm, addr)) {
+ pr_err("cannot map VPRIV at expected address!\n");
return;
+ }
- /* Account for vvar page. */
+ /* Account for the private storage. */
addr += PAGE_SIZE;
- len = (vdso_total_pages - 1) << PAGE_SHIFT;
+ if (install_vvar(mm, addr)) {
+ WARN(1, "cannot map VVAR at expected address!\n");
+ return;
+ }
+
+ /* Account for vvar and vpriv pages. */
+ addr += PAGE_SIZE;
+ len = (vdso_total_pages - 2) << PAGE_SHIFT;
vma = _install_special_mapping(mm, addr, len,
VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
&vdso_text_mapping);
- if (!IS_ERR(vma))
+ if (IS_ERR(vma) || vma->vm_start != addr)
+ WARN(1, "cannot map VDSO at expected address!\n");
+ else
mm->context.vdso = addr;
}
--
Gitblit v1.6.2