From 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Tue, 22 Oct 2024 10:36:11 +0000
Subject: [PATCH] 修改4g拨号为QMI,需要在系统里后台执行quectel-CM

---
 kernel/arch/arm64/include/uapi/asm/sigcontext.h |   74 ++++++++++++++++++++++---------------
 1 files changed, 44 insertions(+), 30 deletions(-)

diff --git a/kernel/arch/arm64/include/uapi/asm/sigcontext.h b/kernel/arch/arm64/include/uapi/asm/sigcontext.h
index dca8f8b..0c796c7 100644
--- a/kernel/arch/arm64/include/uapi/asm/sigcontext.h
+++ b/kernel/arch/arm64/include/uapi/asm/sigcontext.h
@@ -77,6 +77,15 @@
 	__uint128_t vregs[32];
 };
 
+/*
+ * Note: similarly to all other integer fields, each V-register is stored in an
+ * endianness-dependent format, with the byte at offset i from the start of the
+ * in-memory representation of the register value containing
+ *
+ *    bits [(7 + 8 * i) : (8 * i)] of the register on little-endian hosts; or
+ *    bits [(127 - 8 * i) : (120 - 8 * i)] on big-endian hosts.
+ */
+
 /* ESR_EL1 context */
 #define ESR_MAGIC	0x45535201
 
@@ -130,29 +139,30 @@
 
 #endif /* !__ASSEMBLY__ */
 
+#include <asm/sve_context.h>
+
 /*
  * The SVE architecture leaves space for future expansion of the
  * vector length beyond its initial architectural limit of 2048 bits
  * (16 quadwords).
  *
- * See linux/Documentation/arm64/sve.txt for a description of the VL/VQ
+ * See linux/Documentation/arm64/sve.rst for a description of the VL/VQ
  * terminology.
  */
-#define SVE_VQ_BYTES		16	/* number of bytes per quadword */
+#define SVE_VQ_BYTES		__SVE_VQ_BYTES	/* bytes per quadword */
 
-#define SVE_VQ_MIN		1
-#define SVE_VQ_MAX		512
+#define SVE_VQ_MIN		__SVE_VQ_MIN
+#define SVE_VQ_MAX		__SVE_VQ_MAX
 
-#define SVE_VL_MIN		(SVE_VQ_MIN * SVE_VQ_BYTES)
-#define SVE_VL_MAX		(SVE_VQ_MAX * SVE_VQ_BYTES)
+#define SVE_VL_MIN		__SVE_VL_MIN
+#define SVE_VL_MAX		__SVE_VL_MAX
 
-#define SVE_NUM_ZREGS		32
-#define SVE_NUM_PREGS		16
+#define SVE_NUM_ZREGS		__SVE_NUM_ZREGS
+#define SVE_NUM_PREGS		__SVE_NUM_PREGS
 
-#define sve_vl_valid(vl) \
-	((vl) % SVE_VQ_BYTES == 0 && (vl) >= SVE_VL_MIN && (vl) <= SVE_VL_MAX)
-#define sve_vq_from_vl(vl)	((vl) / SVE_VQ_BYTES)
-#define sve_vl_from_vq(vq)	((vq) * SVE_VQ_BYTES)
+#define sve_vl_valid(vl)	__sve_vl_valid(vl)
+#define sve_vq_from_vl(vl)	__sve_vq_from_vl(vl)
+#define sve_vl_from_vq(vq)	__sve_vl_from_vq(vq)
 
 /*
  * If the SVE registers are currently live for the thread at signal delivery,
@@ -169,7 +179,7 @@
  * The same convention applies when returning from a signal: a caller
  * will need to remove or resize the sve_context block if it wants to
  * make the SVE registers live when they were previously non-live or
- * vice-versa.  This may require the the caller to allocate fresh
+ * vice-versa.  This may require the caller to allocate fresh
  * memory and/or move other context blocks in the signal frame.
  *
  * Changing the vector length during signal return is not permitted:
@@ -203,36 +213,40 @@
  *	FFR	uint16_t[vq]			first-fault status register
  *
  * Additional data might be appended in the future.
+ *
+ * Unlike vregs[] in fpsimd_context, each SVE scalable register (Z-, P- or FFR)
+ * is encoded in memory in an endianness-invariant format, with the byte at
+ * offset i from the start of the in-memory representation containing bits
+ * [(7 + 8 * i) : (8 * i)] of the register value.
  */
 
-#define SVE_SIG_ZREG_SIZE(vq)	((__u32)(vq) * SVE_VQ_BYTES)
-#define SVE_SIG_PREG_SIZE(vq)	((__u32)(vq) * (SVE_VQ_BYTES / 8))
-#define SVE_SIG_FFR_SIZE(vq)	SVE_SIG_PREG_SIZE(vq)
+#define SVE_SIG_ZREG_SIZE(vq)	__SVE_ZREG_SIZE(vq)
+#define SVE_SIG_PREG_SIZE(vq)	__SVE_PREG_SIZE(vq)
+#define SVE_SIG_FFR_SIZE(vq)	__SVE_FFR_SIZE(vq)
 
 #define SVE_SIG_REGS_OFFSET					\
-	((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1))	\
-		/ SVE_VQ_BYTES * SVE_VQ_BYTES)
+	((sizeof(struct sve_context) + (__SVE_VQ_BYTES - 1))	\
+		/ __SVE_VQ_BYTES * __SVE_VQ_BYTES)
 
-#define SVE_SIG_ZREGS_OFFSET	SVE_SIG_REGS_OFFSET
+#define SVE_SIG_ZREGS_OFFSET \
+		(SVE_SIG_REGS_OFFSET + __SVE_ZREGS_OFFSET)
 #define SVE_SIG_ZREG_OFFSET(vq, n) \
-	(SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREG_SIZE(vq) * (n))
-#define SVE_SIG_ZREGS_SIZE(vq) \
-	(SVE_SIG_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_SIG_ZREGS_OFFSET)
+		(SVE_SIG_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n))
+#define SVE_SIG_ZREGS_SIZE(vq) __SVE_ZREGS_SIZE(vq)
 
 #define SVE_SIG_PREGS_OFFSET(vq) \
-	(SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREGS_SIZE(vq))
+		(SVE_SIG_REGS_OFFSET + __SVE_PREGS_OFFSET(vq))
 #define SVE_SIG_PREG_OFFSET(vq, n) \
-	(SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREG_SIZE(vq) * (n))
-#define SVE_SIG_PREGS_SIZE(vq) \
-	(SVE_SIG_PREG_OFFSET(vq, SVE_NUM_PREGS) - SVE_SIG_PREGS_OFFSET(vq))
+		(SVE_SIG_REGS_OFFSET + __SVE_PREG_OFFSET(vq, n))
+#define SVE_SIG_PREGS_SIZE(vq) __SVE_PREGS_SIZE(vq)
 
 #define SVE_SIG_FFR_OFFSET(vq) \
-	(SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREGS_SIZE(vq))
+		(SVE_SIG_REGS_OFFSET + __SVE_FFR_OFFSET(vq))
 
 #define SVE_SIG_REGS_SIZE(vq) \
-	(SVE_SIG_FFR_OFFSET(vq) + SVE_SIG_FFR_SIZE(vq) - SVE_SIG_REGS_OFFSET)
+		(__SVE_FFR_OFFSET(vq) + __SVE_FFR_SIZE(vq))
 
-#define SVE_SIG_CONTEXT_SIZE(vq) (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq))
-
+#define SVE_SIG_CONTEXT_SIZE(vq) \
+		(SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq))
 
 #endif /* _UAPI__ASM_SIGCONTEXT_H */

--
Gitblit v1.6.2