hc
2024-05-10 61598093bbdd283a7edc367d900f223070ead8d2
kernel/include/linux/compiler.h
....@@ -23,8 +23,8 @@
2323 #define __branch_check__(x, expect, is_constant) ({ \
2424 long ______r; \
2525 static struct ftrace_likely_data \
26
- __attribute__((__aligned__(4))) \
27
- __attribute__((section("_ftrace_annotated_branch"))) \
26
+ __aligned(4) \
27
+ __section("_ftrace_annotated_branch") \
2828 ______f = { \
2929 .data.func = __func__, \
3030 .data.file = __FILE__, \
....@@ -53,23 +53,24 @@
5353 * "Define 'is'", Bill Clinton
5454 * "Define 'if'", Steven Rostedt
5555 */
56
-#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
57
-#define __trace_if(cond) \
58
- if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
59
- ({ \
60
- int ______r; \
61
- static struct ftrace_branch_data \
62
- __attribute__((__aligned__(4))) \
63
- __attribute__((section("_ftrace_branch"))) \
64
- ______f = { \
65
- .func = __func__, \
66
- .file = __FILE__, \
67
- .line = __LINE__, \
68
- }; \
69
- ______r = !!(cond); \
70
- ______f.miss_hit[______r]++; \
71
- ______r; \
72
- }))
56
+#define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
57
+
58
+#define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
59
+
60
+#define __trace_if_value(cond) ({ \
61
+ static struct ftrace_branch_data \
62
+ __aligned(4) \
63
+ __section("_ftrace_branch") \
64
+ __if_trace = { \
65
+ .func = __func__, \
66
+ .file = __FILE__, \
67
+ .line = __LINE__, \
68
+ }; \
69
+ (cond) ? \
70
+ (__if_trace.miss_hit[1]++,1) : \
71
+ (__if_trace.miss_hit[0]++,0); \
72
+})
73
+
7374 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
7475
7576 #else
....@@ -130,62 +131,13 @@
130131 ".long 999b - .\n\t" \
131132 ".popsection\n\t"
132133
133
-#ifdef CONFIG_DEBUG_ENTRY
134
-/* Begin/end of an instrumentation safe region */
135
-#define instrumentation_begin() ({ \
136
- asm volatile("%c0:\n\t" \
137
- ".pushsection .discard.instr_begin\n\t" \
138
- ".long %c0b - .\n\t" \
139
- ".popsection\n\t" : : "i" (__COUNTER__)); \
140
-})
141
-
142
-/*
143
- * Because instrumentation_{begin,end}() can nest, objtool validation considers
144
- * _begin() a +1 and _end() a -1 and computes a sum over the instructions.
145
- * When the value is greater than 0, we consider instrumentation allowed.
146
- *
147
- * There is a problem with code like:
148
- *
149
- * noinstr void foo()
150
- * {
151
- * instrumentation_begin();
152
- * ...
153
- * if (cond) {
154
- * instrumentation_begin();
155
- * ...
156
- * instrumentation_end();
157
- * }
158
- * bar();
159
- * instrumentation_end();
160
- * }
161
- *
162
- * If instrumentation_end() would be an empty label, like all the other
163
- * annotations, the inner _end(), which is at the end of a conditional block,
164
- * would land on the instruction after the block.
165
- *
166
- * If we then consider the sum of the !cond path, we'll see that the call to
167
- * bar() is with a 0-value, even though, we meant it to happen with a positive
168
- * value.
169
- *
170
- * To avoid this, have _end() be a NOP instruction, this ensures it will be
171
- * part of the condition block and does not escape.
172
- */
173
-#define instrumentation_end() ({ \
174
- asm volatile("%c0: nop\n\t" \
175
- ".pushsection .discard.instr_end\n\t" \
176
- ".long %c0b - .\n\t" \
177
- ".popsection\n\t" : : "i" (__COUNTER__)); \
178
-})
179
-#endif /* CONFIG_DEBUG_ENTRY */
134
+/* Annotate a C jump table to allow objtool to follow the code flow */
135
+#define __annotate_jump_table __section(".rodata..c_jump_table")
180136
181137 #else
182138 #define annotate_reachable()
183139 #define annotate_unreachable()
184
-#endif
185
-
186
-#ifndef instrumentation_begin
187
-#define instrumentation_begin() do { } while(0)
188
-#define instrumentation_end() do { } while(0)
140
+#define __annotate_jump_table
189141 #endif
190142
191143 #ifndef ASM_UNREACHABLE
....@@ -217,7 +169,7 @@
217169 extern typeof(sym) sym; \
218170 static const unsigned long __kentry_##sym \
219171 __used \
220
- __attribute__((section("___kentry" "+" #sym ), used)) \
172
+ __attribute__((__section__("___kentry+" #sym))) \
221173 = (unsigned long)&sym;
222174 #endif
223175
....@@ -241,116 +193,24 @@
241193 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
242194 #endif
243195
244
-#include <uapi/linux/types.h>
245
-
246
-#define __READ_ONCE_SIZE \
247
-({ \
248
- switch (size) { \
249
- case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
250
- case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
251
- case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
252
- case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
253
- default: \
254
- barrier(); \
255
- __builtin_memcpy((void *)res, (const void *)p, size); \
256
- barrier(); \
257
- } \
258
-})
259
-
260
-static __always_inline
261
-void __read_once_size(const volatile void *p, void *res, int size)
262
-{
263
- __READ_ONCE_SIZE;
264
-}
265
-
266
-#ifdef CONFIG_KASAN
267
-/*
268
- * We can't declare function 'inline' because __no_sanitize_address confilcts
269
- * with inlining. Attempt to inline it may cause a build failure.
270
- * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
271
- * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
272
- */
273
-# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
274
-#else
275
-# define __no_kasan_or_inline __always_inline
276
-#endif
277
-
278
-static __no_kasan_or_inline
279
-void __read_once_size_nocheck(const volatile void *p, void *res, int size)
280
-{
281
- __READ_ONCE_SIZE;
282
-}
283
-
284
-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
285
-{
286
- switch (size) {
287
- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
288
- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
289
- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
290
- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
291
- default:
292
- barrier();
293
- __builtin_memcpy((void *)p, (const void *)res, size);
294
- barrier();
295
- }
296
-}
297
-
298
-/*
299
- * Prevent the compiler from merging or refetching reads or writes. The
300
- * compiler is also forbidden from reordering successive instances of
301
- * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
302
- * particular ordering. One way to make the compiler aware of ordering is to
303
- * put the two invocations of READ_ONCE or WRITE_ONCE in different C
304
- * statements.
196
+/**
197
+ * data_race - mark an expression as containing intentional data races
305198 *
306
- * These two macros will also work on aggregate data types like structs or
307
- * unions. If the size of the accessed data type exceeds the word size of
308
- * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
309
- * fall back to memcpy(). There's at least two memcpy()s: one for the
310
- * __builtin_memcpy() and then one for the macro doing the copy of variable
311
- * - '__u' allocated on the stack.
199
+ * This data_race() macro is useful for situations in which data races
200
+ * should be forgiven. One example is diagnostic code that accesses
201
+ * shared variables but is not a part of the core synchronization design.
312202 *
313
- * Their two major use cases are: (1) Mediating communication between
314
- * process-level code and irq/NMI handlers, all running on the same CPU,
315
- * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
316
- * mutilate accesses that either do not require ordering or that interact
317
- * with an explicit memory barrier or atomic instruction that provides the
318
- * required ordering.
203
+ * This macro *does not* affect normal code generation, but is a hint
204
+ * to tooling that data races here are to be ignored.
319205 */
320
-#include <asm/barrier.h>
321
-#include <linux/kasan-checks.h>
322
-
323
-#define __READ_ONCE(x, check) \
206
+#define data_race(expr) \
324207 ({ \
325
- union { typeof(x) __val; char __c[1]; } __u; \
326
- if (check) \
327
- __read_once_size(&(x), __u.__c, sizeof(x)); \
328
- else \
329
- __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
330
- smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
331
- __u.__val; \
332
-})
333
-#define READ_ONCE(x) __READ_ONCE(x, 1)
334
-
335
-/*
336
- * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
337
- * to hide memory access from KASAN.
338
- */
339
-#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
340
-
341
-static __no_kasan_or_inline
342
-unsigned long read_word_at_a_time(const void *addr)
343
-{
344
- kasan_check_read(addr, 1);
345
- return *(unsigned long *)addr;
346
-}
347
-
348
-#define WRITE_ONCE(x, val) \
349
-({ \
350
- union { typeof(x) __val; char __c[1]; } __u = \
351
- { .__val = (__force typeof(x)) (val) }; \
352
- __write_once_size(&(x), __u.__c, sizeof(x)); \
353
- __u.__val; \
208
+ __unqual_scalar_typeof(({ expr; })) __v = ({ \
209
+ __kcsan_disable_current(); \
210
+ expr; \
211
+ }); \
212
+ __kcsan_enable_current(); \
213
+ __v; \
354214 })
355215
356216 #endif /* __KERNEL__ */
....@@ -362,8 +222,8 @@
362222 * visible to the compiler.
363223 */
364224 #define __ADDRESSABLE(sym) \
365
- static void * __attribute__((section(".discard.addressable"), used)) \
366
- __PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
225
+ static void * __section(".discard.addressable") __used \
226
+ __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym;
367227
368228 /**
369229 * offset_to_ptr - convert a relative memory offset to an absolute pointer
....@@ -376,50 +236,8 @@
376236
377237 #endif /* __ASSEMBLY__ */
378238
379
-#ifndef __optimize
380
-# define __optimize(level)
381
-#endif
382
-
383
-/* Compile time object size, -1 for unknown */
384
-#ifndef __compiletime_object_size
385
-# define __compiletime_object_size(obj) -1
386
-#endif
387
-#ifndef __compiletime_warning
388
-# define __compiletime_warning(message)
389
-#endif
390
-#ifndef __compiletime_error
391
-# define __compiletime_error(message)
392
-#endif
393
-
394
-#ifdef __OPTIMIZE__
395
-# define __compiletime_assert(condition, msg, prefix, suffix) \
396
- do { \
397
- extern void prefix ## suffix(void) __compiletime_error(msg); \
398
- if (!(condition)) \
399
- prefix ## suffix(); \
400
- } while (0)
401
-#else
402
-# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
403
-#endif
404
-
405
-#define _compiletime_assert(condition, msg, prefix, suffix) \
406
- __compiletime_assert(condition, msg, prefix, suffix)
407
-
408
-/**
409
- * compiletime_assert - break build and emit msg if condition is false
410
- * @condition: a compile-time constant condition to check
411
- * @msg: a message to emit if condition is false
412
- *
413
- * In tradition of POSIX assert, this macro will break the build if the
414
- * supplied condition is *false*, emitting the supplied error message if the
415
- * compiler has support to do so.
416
- */
417
-#define compiletime_assert(condition, msg) \
418
- _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
419
-
420
-#define compiletime_assert_atomic_type(t) \
421
- compiletime_assert(__native_word(t), \
422
- "Need native word sized stores/loads for atomicity.")
239
+/* &a[0] degrades to a pointer: a different type from an array */
240
+#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
423241
424242 /*
425243 * This is needed in functions which generate the stack canary, see
....@@ -427,4 +245,6 @@
427245 */
428246 #define prevent_tail_call_optimization() mb()
429247
248
+#include <asm/rwonce.h>
249
+
430250 #endif /* __LINUX_COMPILER_H */