hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/include/linux/kernel.h
....@@ -2,19 +2,22 @@
22 #ifndef _LINUX_KERNEL_H
33 #define _LINUX_KERNEL_H
44
5
+
56 #include <stdarg.h>
7
+#include <linux/limits.h>
68 #include <linux/linkage.h>
79 #include <linux/stddef.h>
810 #include <linux/types.h>
911 #include <linux/compiler.h>
1012 #include <linux/bitops.h>
1113 #include <linux/log2.h>
14
+#include <linux/minmax.h>
1215 #include <linux/typecheck.h>
1316 #include <linux/printk.h>
1417 #include <linux/build_bug.h>
1518 #include <asm/byteorder.h>
19
+#include <asm/div64.h>
1620 #include <uapi/linux/kernel.h>
17
-#include <linux/limits.h>
1821
1922 #define STACK_MAGIC 0xdeadbeef
2023
....@@ -31,6 +34,7 @@
3134 #define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
3235 #define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
3336 #define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
37
+#define PTR_ALIGN_DOWN(p, a) ((typeof(p))ALIGN_DOWN((unsigned long)(p), (a)))
3438 #define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
3539
3640 /* generic data direction definitions */
....@@ -76,14 +80,7 @@
7680 */
7781 #define round_down(x, y) ((x) & ~__round_mask(x, y))
7882
79
-/**
80
- * FIELD_SIZEOF - get the size of a struct's field
81
- * @t: the target struct
82
- * @f: the target struct's field
83
- * Return: the size of @f in the struct definition without having a
84
- * declared instance of @t.
85
- */
86
-#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
83
+#define typeof_member(T, m) typeof(((T*)0)->m)
8784
8885 #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
8986
....@@ -106,12 +103,10 @@
106103 *
107104 * Rounds @x up to next multiple of @y. If @y will always be a power
108105 * of 2, consider using the faster round_up().
109
- *
110
- * The `const' here prevents gcc-3.3 from calling __divdi3
111106 */
112107 #define roundup(x, y) ( \
113108 { \
114
- const typeof(y) __y = y; \
109
+ typeof(y) __y = y; \
115110 (((x) + (__y - 1)) / __y) * __y; \
116111 } \
117112 )
....@@ -176,19 +171,7 @@
176171 #define _RET_IP_ (unsigned long)__builtin_return_address(0)
177172 #define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
178173
179
-#ifdef CONFIG_LBDAF
180
-# include <asm/div64.h>
181
-# define sector_div(a, b) do_div(a, b)
182
-#else
183
-# define sector_div(n, b)( \
184
-{ \
185
- int _res; \
186
- _res = (n) % (b); \
187
- (n) /= (b); \
188
- _res; \
189
-} \
190
-)
191
-#endif
174
+#define sector_div(a, b) do_div(a, b)
192175
193176 /**
194177 * upper_32_bits - return bits 32-63 of a number
....@@ -204,7 +187,7 @@
204187 * lower_32_bits - return bits 0-31 of a number
205188 * @n: the number we're accessing
206189 */
207
-#define lower_32_bits(n) ((u32)(n))
190
+#define lower_32_bits(n) ((u32)((n) & 0xffffffff))
208191
209192 struct completion;
210193 struct pt_regs;
....@@ -218,13 +201,17 @@
218201 #endif
219202
220203 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
221
- void ___might_sleep(const char *file, int line, int preempt_offset);
222
- void __might_sleep(const char *file, int line, int preempt_offset);
204
+extern void ___might_sleep(const char *file, int line, int preempt_offset);
205
+extern void __might_sleep(const char *file, int line, int preempt_offset);
206
+extern void __cant_sleep(const char *file, int line, int preempt_offset);
207
+
223208 /**
224209 * might_sleep - annotation for functions that can sleep
225210 *
226211 * this macro will print a stack trace if it is executed in an atomic
227
- * context (spinlock, irq-handler, ...).
212
+ * context (spinlock, irq-handler, ...). Additional sections where blocking is
213
+ * not allowed can be annotated with non_block_start() and non_block_end()
214
+ * pairs.
228215 *
229216 * This is a useful debugging help to be able to catch problems early and not
230217 * be bitten later when the calling function happens to sleep when it is not
....@@ -232,21 +219,51 @@
232219 */
233220 # define might_sleep() \
234221 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
235
-
236
-# define might_sleep_no_state_check() \
237
- do { ___might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
222
+/**
223
+ * cant_sleep - annotation for functions that cannot sleep
224
+ *
225
+ * this macro will print a stack trace if it is executed with preemption enabled
226
+ */
227
+# define cant_sleep() \
228
+ do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
238229 # define sched_annotate_sleep() (current->task_state_change = 0)
230
+/**
231
+ * non_block_start - annotate the start of section where sleeping is prohibited
232
+ *
233
+ * This is on behalf of the oom reaper, specifically when it is calling the mmu
234
+ * notifiers. The problem is that if the notifier were to block on, for example,
235
+ * mutex_lock() and if the process which holds that mutex were to perform a
236
+ * sleeping memory allocation, the oom reaper is now blocked on completion of
237
+ * that memory allocation. Other blocking calls like wait_event() pose similar
238
+ * issues.
239
+ */
240
+# define non_block_start() (current->non_block_count++)
241
+/**
242
+ * non_block_end - annotate the end of section where sleeping is prohibited
243
+ *
244
+ * Closes a section opened by non_block_start().
245
+ */
246
+# define non_block_end() WARN_ON(current->non_block_count-- == 0)
239247 #else
240248 static inline void ___might_sleep(const char *file, int line,
241249 int preempt_offset) { }
242250 static inline void __might_sleep(const char *file, int line,
243251 int preempt_offset) { }
244252 # define might_sleep() do { might_resched(); } while (0)
245
-# define might_sleep_no_state_check() do { might_resched(); } while (0)
253
+# define cant_sleep() do { } while (0)
246254 # define sched_annotate_sleep() do { } while (0)
255
+# define non_block_start() do { } while (0)
256
+# define non_block_end() do { } while (0)
247257 #endif
248258
249259 #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
260
+
261
+#ifndef CONFIG_PREEMPT_RT
262
+# define cant_migrate() cant_sleep()
263
+#else
264
+ /* Placeholder for now */
265
+# define cant_migrate() do { } while (0)
266
+#endif
250267
251268 /**
252269 * abs - return absolute value of an argument
....@@ -299,24 +316,15 @@
299316 #endif
300317
301318 extern struct atomic_notifier_head panic_notifier_list;
302
-extern void (*vendor_panic_cb)(u64 sp);
303319 extern long (*panic_blink)(int state);
304320 __printf(1, 2)
305321 void panic(const char *fmt, ...) __noreturn __cold;
306322 void nmi_panic(struct pt_regs *regs, const char *msg);
307323 extern void oops_enter(void);
308324 extern void oops_exit(void);
309
-void print_oops_end_marker(void);
310
-extern int oops_may_print(void);
325
+extern bool oops_may_print(void);
311326 void do_exit(long error_code) __noreturn;
312327 void complete_and_exit(struct completion *, long) __noreturn;
313
-
314
-#ifdef CONFIG_ARCH_HAS_REFCOUNT
315
-void refcount_error_report(struct pt_regs *regs, const char *err);
316
-#else
317
-static inline void refcount_error_report(struct pt_regs *regs, const char *err)
318
-{ }
319
-#endif
320328
321329 /* Internal, do not use. */
322330 int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
....@@ -338,8 +346,7 @@
338346 * @res: Where to write the result of the conversion on success.
339347 *
340348 * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
341
- * Used as a replacement for the obsolete simple_strtoull. Return code must
342
- * be checked.
349
+ * Preferred over simple_strtoul(). Return code must be checked.
343350 */
344351 static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
345352 {
....@@ -367,8 +374,7 @@
367374 * @res: Where to write the result of the conversion on success.
368375 *
369376 * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
370
- * Used as a replacement for the obsolete simple_strtoull. Return code must
371
- * be checked.
377
+ * Preferred over simple_strtol(). Return code must be checked.
372378 */
373379 static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
374380 {
....@@ -444,7 +450,18 @@
444450 return kstrtoint_from_user(s, count, base, res);
445451 }
446452
447
-/* Obsolete, do not use. Use kstrto<foo> instead */
453
+/*
454
+ * Use kstrto<foo> instead.
455
+ *
456
+ * NOTE: simple_strto<foo> does not check for the range overflow and,
457
+ * depending on the input, may give interesting results.
458
+ *
459
+ * Use these functions if and only if you cannot use kstrto<foo>, because
460
+ * the conversion ends on the first non-digit character, which may be far
461
+ * beyond the supported range. It might be useful to parse the strings like
462
+ * 10x50 or 12:21 without altering original string or temporary buffer in use.
463
+ * Keep in mind above caveat.
464
+ */
448465
449466 extern unsigned long simple_strtoul(const char *,char **,unsigned int);
450467 extern long simple_strtol(const char *,char **,unsigned int);
....@@ -491,6 +508,7 @@
491508 extern int kernel_text_address(unsigned long addr);
492509 extern int func_ptr_is_kernel_text(void *ptr);
493510
511
+u64 int_pow(u64 base, unsigned int exp);
494512 unsigned long int_sqrt(unsigned long);
495513
496514 #if BITS_PER_LONG < 64
....@@ -502,13 +520,21 @@
502520 }
503521 #endif
504522
523
+#ifdef CONFIG_SMP
524
+extern unsigned int sysctl_oops_all_cpu_backtrace;
525
+#else
526
+#define sysctl_oops_all_cpu_backtrace 0
527
+#endif /* CONFIG_SMP */
528
+
505529 extern void bust_spinlocks(int yes);
506
-extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
507530 extern int panic_timeout;
531
+extern unsigned long panic_print;
508532 extern int panic_on_oops;
509533 extern int panic_on_unrecovered_nmi;
510534 extern int panic_on_io_nmi;
511535 extern int panic_on_warn;
536
+extern unsigned long panic_on_taint;
537
+extern bool panic_on_taint_nousertaint;
512538 extern int sysctl_panic_on_rcu_stall;
513539 extern int sysctl_panic_on_stackoverflow;
514540
....@@ -577,6 +603,7 @@
577603 #define TAINT_AUX 16
578604 #define TAINT_RANDSTRUCT 17
579605 #define TAINT_FLAGS_COUNT 18
606
+#define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1)
580607
581608 struct taint_flag {
582609 char c_true; /* character printed when tainted */
....@@ -608,7 +635,11 @@
608635 return buf;
609636 }
610637
638
+#ifdef __GENKSYMS__
611639 extern int hex_to_bin(char ch);
640
+#else
641
+extern int hex_to_bin(unsigned char ch);
642
+#endif
612643 extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
613644 extern char *bin2hex(char *dst, const void *src, size_t count);
614645
....@@ -702,7 +733,7 @@
702733 #define do_trace_printk(fmt, args...) \
703734 do { \
704735 static const char *trace_printk_fmt __used \
705
- __attribute__((section("__trace_printk_fmt"))) = \
736
+ __section("__trace_printk_fmt") = \
706737 __builtin_constant_p(fmt) ? fmt : NULL; \
707738 \
708739 __trace_printk_check_format(fmt, ##args); \
....@@ -746,7 +777,7 @@
746777
747778 #define trace_puts(str) ({ \
748779 static const char *trace_printk_fmt __used \
749
- __attribute__((section("__trace_printk_fmt"))) = \
780
+ __section("__trace_printk_fmt") = \
750781 __builtin_constant_p(str) ? str : NULL; \
751782 \
752783 if (__builtin_constant_p(str)) \
....@@ -768,7 +799,7 @@
768799 do { \
769800 if (__builtin_constant_p(fmt)) { \
770801 static const char *trace_printk_fmt __used \
771
- __attribute__((section("__trace_printk_fmt"))) = \
802
+ __section("__trace_printk_fmt") = \
772803 __builtin_constant_p(fmt) ? fmt : NULL; \
773804 \
774805 __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
....@@ -806,155 +837,6 @@
806837 }
807838 static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
808839 #endif /* CONFIG_TRACING */
809
-
810
-/*
811
- * min()/max()/clamp() macros must accomplish three things:
812
- *
813
- * - avoid multiple evaluations of the arguments (so side-effects like
814
- * "x++" happen only once) when non-constant.
815
- * - perform strict type-checking (to generate warnings instead of
816
- * nasty runtime surprises). See the "unnecessary" pointer comparison
817
- * in __typecheck().
818
- * - retain result as a constant expressions when called with only
819
- * constant expressions (to avoid tripping VLA warnings in stack
820
- * allocation usage).
821
- */
822
-#define __typecheck(x, y) \
823
- (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
824
-
825
-/*
826
- * This returns a constant expression while determining if an argument is
827
- * a constant expression, most importantly without evaluating the argument.
828
- * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
829
- */
830
-#define __is_constexpr(x) \
831
- (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
832
-
833
-#define __no_side_effects(x, y) \
834
- (__is_constexpr(x) && __is_constexpr(y))
835
-
836
-#define __safe_cmp(x, y) \
837
- (__typecheck(x, y) && __no_side_effects(x, y))
838
-
839
-#define __cmp(x, y, op) ((x) op (y) ? (x) : (y))
840
-
841
-#define __cmp_once(x, y, unique_x, unique_y, op) ({ \
842
- typeof(x) unique_x = (x); \
843
- typeof(y) unique_y = (y); \
844
- __cmp(unique_x, unique_y, op); })
845
-
846
-#define __careful_cmp(x, y, op) \
847
- __builtin_choose_expr(__safe_cmp(x, y), \
848
- __cmp(x, y, op), \
849
- __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op))
850
-
851
-/**
852
- * min - return minimum of two values of the same or compatible types
853
- * @x: first value
854
- * @y: second value
855
- */
856
-#define min(x, y) __careful_cmp(x, y, <)
857
-
858
-/**
859
- * max - return maximum of two values of the same or compatible types
860
- * @x: first value
861
- * @y: second value
862
- */
863
-#define max(x, y) __careful_cmp(x, y, >)
864
-
865
-/**
866
- * min3 - return minimum of three values
867
- * @x: first value
868
- * @y: second value
869
- * @z: third value
870
- */
871
-#define min3(x, y, z) min((typeof(x))min(x, y), z)
872
-
873
-/**
874
- * max3 - return maximum of three values
875
- * @x: first value
876
- * @y: second value
877
- * @z: third value
878
- */
879
-#define max3(x, y, z) max((typeof(x))max(x, y), z)
880
-
881
-/**
882
- * min_not_zero - return the minimum that is _not_ zero, unless both are zero
883
- * @x: value1
884
- * @y: value2
885
- */
886
-#define min_not_zero(x, y) ({ \
887
- typeof(x) __x = (x); \
888
- typeof(y) __y = (y); \
889
- __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
890
-
891
-/**
892
- * clamp - return a value clamped to a given range with strict typechecking
893
- * @val: current value
894
- * @lo: lowest allowable value
895
- * @hi: highest allowable value
896
- *
897
- * This macro does strict typechecking of @lo/@hi to make sure they are of the
898
- * same type as @val. See the unnecessary pointer comparisons.
899
- */
900
-#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
901
-
902
-/*
903
- * ..and if you can't take the strict
904
- * types, you can specify one yourself.
905
- *
906
- * Or not use min/max/clamp at all, of course.
907
- */
908
-
909
-/**
910
- * min_t - return minimum of two values, using the specified type
911
- * @type: data type to use
912
- * @x: first value
913
- * @y: second value
914
- */
915
-#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <)
916
-
917
-/**
918
- * max_t - return maximum of two values, using the specified type
919
- * @type: data type to use
920
- * @x: first value
921
- * @y: second value
922
- */
923
-#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >)
924
-
925
-/**
926
- * clamp_t - return a value clamped to a given range using a given type
927
- * @type: the type of variable to use
928
- * @val: current value
929
- * @lo: minimum allowable value
930
- * @hi: maximum allowable value
931
- *
932
- * This macro does no typechecking and uses temporary variables of type
933
- * @type to make all the comparisons.
934
- */
935
-#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
936
-
937
-/**
938
- * clamp_val - return a value clamped to a given range using val's type
939
- * @val: current value
940
- * @lo: minimum allowable value
941
- * @hi: maximum allowable value
942
- *
943
- * This macro does no typechecking and uses temporary variables of whatever
944
- * type the input argument @val is. This is useful when @val is an unsigned
945
- * type and @lo and @hi are literals that will otherwise be assigned a signed
946
- * integer type.
947
- */
948
-#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
949
-
950
-
951
-/**
952
- * swap - swap values of @a and @b
953
- * @a: first value
954
- * @b: second value
955
- */
956
-#define swap(a, b) \
957
- do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
958840
959841 /* This counts to 12. Any more, it will return 13th argument. */
960842 #define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n