hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/tools/testing/selftests/rseq/rseq.h
....@@ -16,8 +16,9 @@
1616 #include <errno.h>
1717 #include <stdio.h>
1818 #include <stdlib.h>
19
-#include <sched.h>
20
-#include <linux/rseq.h>
19
+#include <stddef.h>
20
+#include "rseq-abi.h"
21
+#include "compiler.h"
2122
2223 /*
2324 * Empty code injection macros, override when testing.
....@@ -44,7 +45,20 @@
4445 #define RSEQ_INJECT_FAILED
4546 #endif
4647
47
-extern __thread volatile struct rseq __rseq_abi;
48
+#include "rseq-thread-pointer.h"
49
+
50
+/* Offset from the thread pointer to the rseq area. */
51
+extern ptrdiff_t rseq_offset;
52
+/* Size of the registered rseq area. 0 if the registration was
53
+ unsuccessful. */
54
+extern unsigned int rseq_size;
55
+/* Flags used during rseq registration. */
56
+extern unsigned int rseq_flags;
57
+
58
+static inline struct rseq_abi *rseq_get_abi(void)
59
+{
60
+ return (struct rseq_abi *) ((uintptr_t) rseq_thread_pointer() + rseq_offset);
61
+}
4862
4963 #define rseq_likely(x) __builtin_expect(!!(x), 1)
5064 #define rseq_unlikely(x) __builtin_expect(!!(x), 0)
....@@ -108,7 +122,7 @@
108122 */
109123 static inline int32_t rseq_current_cpu_raw(void)
110124 {
111
- return RSEQ_ACCESS_ONCE(__rseq_abi.cpu_id);
125
+ return RSEQ_ACCESS_ONCE(rseq_get_abi()->cpu_id);
112126 }
113127
114128 /*
....@@ -124,7 +138,7 @@
124138 */
125139 static inline uint32_t rseq_cpu_start(void)
126140 {
127
- return RSEQ_ACCESS_ONCE(__rseq_abi.cpu_id_start);
141
+ return RSEQ_ACCESS_ONCE(rseq_get_abi()->cpu_id_start);
128142 }
129143
130144 static inline uint32_t rseq_current_cpu(void)
....@@ -139,21 +153,19 @@
139153
140154 static inline void rseq_clear_rseq_cs(void)
141155 {
142
-#ifdef __LP64__
143
- __rseq_abi.rseq_cs.ptr = 0;
144
-#else
145
- __rseq_abi.rseq_cs.ptr.ptr32 = 0;
146
-#endif
156
+ RSEQ_WRITE_ONCE(rseq_get_abi()->rseq_cs.arch.ptr, 0);
147157 }
148158
149159 /*
150160 * rseq_prepare_unload() should be invoked by each thread executing a rseq
151161 * critical section at least once between their last critical section and
152
- * library unload of the library defining the rseq critical section
153
- * (struct rseq_cs). This also applies to use of rseq in code generated by
154
- * JIT: rseq_prepare_unload() should be invoked at least once by each
155
- * thread executing a rseq critical section before reclaim of the memory
156
- * holding the struct rseq_cs.
162
+ * library unload of the library defining the rseq critical section (struct
163
+ * rseq_cs) or the code referred to by the struct rseq_cs start_ip and
164
+ * post_commit_offset fields. This also applies to use of rseq in code
165
+ * generated by JIT: rseq_prepare_unload() should be invoked at least once by
166
+ * each thread executing a rseq critical section before reclaim of the memory
167
+ * holding the struct rseq_cs or reclaim of the code pointed to by struct
168
+ * rseq_cs start_ip and post_commit_offset fields.
157169 */
158170 static inline void rseq_prepare_unload(void)
159171 {