hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/include/linux/kernel.h
....@@ -2,19 +2,23 @@
22 #ifndef _LINUX_KERNEL_H
33 #define _LINUX_KERNEL_H
44
5
+
56 #include <stdarg.h>
7
+#include <linux/limits.h>
68 #include <linux/linkage.h>
79 #include <linux/stddef.h>
810 #include <linux/types.h>
911 #include <linux/compiler.h>
1012 #include <linux/bitops.h>
13
+#include <linux/kstrtox.h>
1114 #include <linux/log2.h>
15
+#include <linux/minmax.h>
1216 #include <linux/typecheck.h>
1317 #include <linux/printk.h>
1418 #include <linux/build_bug.h>
1519 #include <asm/byteorder.h>
20
+#include <asm/div64.h>
1621 #include <uapi/linux/kernel.h>
17
-#include <linux/limits.h>
1822
1923 #define STACK_MAGIC 0xdeadbeef
2024
....@@ -31,6 +35,7 @@
3135 #define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
3236 #define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
3337 #define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
38
+#define PTR_ALIGN_DOWN(p, a) ((typeof(p))ALIGN_DOWN((unsigned long)(p), (a)))
3439 #define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
3540
3641 /* generic data direction definitions */
....@@ -42,6 +47,8 @@
4247 * @arr: array to be sized
4348 */
4449 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
50
+
51
+#define PTR_IF(cond, ptr) ((cond) ? (ptr) : NULL)
4552
4653 #define u64_to_user_ptr(x) ( \
4754 { \
....@@ -76,14 +83,7 @@
7683 */
7784 #define round_down(x, y) ((x) & ~__round_mask(x, y))
7885
79
-/**
80
- * FIELD_SIZEOF - get the size of a struct's field
81
- * @t: the target struct
82
- * @f: the target struct's field
83
- * Return: the size of @f in the struct definition without having a
84
- * declared instance of @t.
85
- */
86
-#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
86
+#define typeof_member(T, m) typeof(((T*)0)->m)
8787
8888 #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
8989
....@@ -106,12 +106,10 @@
106106 *
107107 * Rounds @x up to next multiple of @y. If @y will always be a power
108108 * of 2, consider using the faster round_up().
109
- *
110
- * The `const' here prevents gcc-3.3 from calling __divdi3
111109 */
112110 #define roundup(x, y) ( \
113111 { \
114
- const typeof(y) __y = y; \
112
+ typeof(y) __y = y; \
115113 (((x) + (__y - 1)) / __y) * __y; \
116114 } \
117115 )
....@@ -176,19 +174,7 @@
176174 #define _RET_IP_ (unsigned long)__builtin_return_address(0)
177175 #define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
178176
179
-#ifdef CONFIG_LBDAF
180
-# include <asm/div64.h>
181
-# define sector_div(a, b) do_div(a, b)
182
-#else
183
-# define sector_div(n, b)( \
184
-{ \
185
- int _res; \
186
- _res = (n) % (b); \
187
- (n) /= (b); \
188
- _res; \
189
-} \
190
-)
191
-#endif
177
+#define sector_div(a, b) do_div(a, b)
192178
193179 /**
194180 * upper_32_bits - return bits 32-63 of a number
....@@ -204,7 +190,7 @@
204190 * lower_32_bits - return bits 0-31 of a number
205191 * @n: the number we're accessing
206192 */
207
-#define lower_32_bits(n) ((u32)(n))
193
+#define lower_32_bits(n) ((u32)((n) & 0xffffffff))
208194
209195 struct completion;
210196 struct pt_regs;
....@@ -218,13 +204,17 @@
218204 #endif
219205
220206 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
221
- void ___might_sleep(const char *file, int line, int preempt_offset);
222
- void __might_sleep(const char *file, int line, int preempt_offset);
207
+extern void ___might_sleep(const char *file, int line, int preempt_offset);
208
+extern void __might_sleep(const char *file, int line, int preempt_offset);
209
+extern void __cant_sleep(const char *file, int line, int preempt_offset);
210
+
223211 /**
224212 * might_sleep - annotation for functions that can sleep
225213 *
226214 * this macro will print a stack trace if it is executed in an atomic
227
- * context (spinlock, irq-handler, ...).
215
+ * context (spinlock, irq-handler, ...). Additional sections where blocking is
216
+ * not allowed can be annotated with non_block_start() and non_block_end()
217
+ * pairs.
228218 *
229219 * This is a useful debugging help to be able to catch problems early and not
230220 * be bitten later when the calling function happens to sleep when it is not
....@@ -232,17 +222,51 @@
232222 */
233223 # define might_sleep() \
234224 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
225
+/**
226
+ * cant_sleep - annotation for functions that cannot sleep
227
+ *
228
+ * this macro will print a stack trace if it is executed with preemption enabled
229
+ */
230
+# define cant_sleep() \
231
+ do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
235232 # define sched_annotate_sleep() (current->task_state_change = 0)
233
+/**
234
+ * non_block_start - annotate the start of section where sleeping is prohibited
235
+ *
236
+ * This is on behalf of the oom reaper, specifically when it is calling the mmu
237
+ * notifiers. The problem is that if the notifier were to block on, for example,
238
+ * mutex_lock() and if the process which holds that mutex were to perform a
239
+ * sleeping memory allocation, the oom reaper is now blocked on completion of
240
+ * that memory allocation. Other blocking calls like wait_event() pose similar
241
+ * issues.
242
+ */
243
+# define non_block_start() (current->non_block_count++)
244
+/**
245
+ * non_block_end - annotate the end of section where sleeping is prohibited
246
+ *
247
+ * Closes a section opened by non_block_start().
248
+ */
249
+# define non_block_end() WARN_ON(current->non_block_count-- == 0)
236250 #else
237251 static inline void ___might_sleep(const char *file, int line,
238252 int preempt_offset) { }
239253 static inline void __might_sleep(const char *file, int line,
240254 int preempt_offset) { }
241255 # define might_sleep() do { might_resched(); } while (0)
256
+# define cant_sleep() do { } while (0)
242257 # define sched_annotate_sleep() do { } while (0)
258
+# define non_block_start() do { } while (0)
259
+# define non_block_end() do { } while (0)
243260 #endif
244261
245262 #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
263
+
264
+#ifndef CONFIG_PREEMPT_RT
265
+# define cant_migrate() cant_sleep()
266
+#else
267
+ /* Placeholder for now */
268
+# define cant_migrate() do { } while (0)
269
+#endif
246270
247271 /**
248272 * abs - return absolute value of an argument
....@@ -295,157 +319,16 @@
295319 #endif
296320
297321 extern struct atomic_notifier_head panic_notifier_list;
298
-extern void (*vendor_panic_cb)(u64 sp);
299322 extern long (*panic_blink)(int state);
300323 __printf(1, 2)
301324 void panic(const char *fmt, ...) __noreturn __cold;
302325 void nmi_panic(struct pt_regs *regs, const char *msg);
326
+void check_panic_on_warn(const char *origin);
303327 extern void oops_enter(void);
304328 extern void oops_exit(void);
305
-void print_oops_end_marker(void);
306
-extern int oops_may_print(void);
329
+extern bool oops_may_print(void);
307330 void do_exit(long error_code) __noreturn;
308331 void complete_and_exit(struct completion *, long) __noreturn;
309
-
310
-#ifdef CONFIG_ARCH_HAS_REFCOUNT
311
-void refcount_error_report(struct pt_regs *regs, const char *err);
312
-#else
313
-static inline void refcount_error_report(struct pt_regs *regs, const char *err)
314
-{ }
315
-#endif
316
-
317
-/* Internal, do not use. */
318
-int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
319
-int __must_check _kstrtol(const char *s, unsigned int base, long *res);
320
-
321
-int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
322
-int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
323
-
324
-/**
325
- * kstrtoul - convert a string to an unsigned long
326
- * @s: The start of the string. The string must be null-terminated, and may also
327
- * include a single newline before its terminating null. The first character
328
- * may also be a plus sign, but not a minus sign.
329
- * @base: The number base to use. The maximum supported base is 16. If base is
330
- * given as 0, then the base of the string is automatically detected with the
331
- * conventional semantics - If it begins with 0x the number will be parsed as a
332
- * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
333
- * parsed as an octal number. Otherwise it will be parsed as a decimal.
334
- * @res: Where to write the result of the conversion on success.
335
- *
336
- * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
337
- * Used as a replacement for the obsolete simple_strtoull. Return code must
338
- * be checked.
339
-*/
340
-static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
341
-{
342
- /*
343
- * We want to shortcut function call, but
344
- * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0.
345
- */
346
- if (sizeof(unsigned long) == sizeof(unsigned long long) &&
347
- __alignof__(unsigned long) == __alignof__(unsigned long long))
348
- return kstrtoull(s, base, (unsigned long long *)res);
349
- else
350
- return _kstrtoul(s, base, res);
351
-}
352
-
353
-/**
354
- * kstrtol - convert a string to a long
355
- * @s: The start of the string. The string must be null-terminated, and may also
356
- * include a single newline before its terminating null. The first character
357
- * may also be a plus sign or a minus sign.
358
- * @base: The number base to use. The maximum supported base is 16. If base is
359
- * given as 0, then the base of the string is automatically detected with the
360
- * conventional semantics - If it begins with 0x the number will be parsed as a
361
- * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
362
- * parsed as an octal number. Otherwise it will be parsed as a decimal.
363
- * @res: Where to write the result of the conversion on success.
364
- *
365
- * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
366
- * Used as a replacement for the obsolete simple_strtoull. Return code must
367
- * be checked.
368
- */
369
-static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
370
-{
371
- /*
372
- * We want to shortcut function call, but
373
- * __builtin_types_compatible_p(long, long long) = 0.
374
- */
375
- if (sizeof(long) == sizeof(long long) &&
376
- __alignof__(long) == __alignof__(long long))
377
- return kstrtoll(s, base, (long long *)res);
378
- else
379
- return _kstrtol(s, base, res);
380
-}
381
-
382
-int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
383
-int __must_check kstrtoint(const char *s, unsigned int base, int *res);
384
-
385
-static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
386
-{
387
- return kstrtoull(s, base, res);
388
-}
389
-
390
-static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
391
-{
392
- return kstrtoll(s, base, res);
393
-}
394
-
395
-static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
396
-{
397
- return kstrtouint(s, base, res);
398
-}
399
-
400
-static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
401
-{
402
- return kstrtoint(s, base, res);
403
-}
404
-
405
-int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
406
-int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
407
-int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
408
-int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
409
-int __must_check kstrtobool(const char *s, bool *res);
410
-
411
-int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
412
-int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
413
-int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
414
-int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
415
-int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
416
-int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
417
-int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
418
-int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
419
-int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
420
-int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
421
-int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
422
-
423
-static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
424
-{
425
- return kstrtoull_from_user(s, count, base, res);
426
-}
427
-
428
-static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
429
-{
430
- return kstrtoll_from_user(s, count, base, res);
431
-}
432
-
433
-static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
434
-{
435
- return kstrtouint_from_user(s, count, base, res);
436
-}
437
-
438
-static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
439
-{
440
- return kstrtoint_from_user(s, count, base, res);
441
-}
442
-
443
-/* Obsolete, do not use. Use kstrto<foo> instead */
444
-
445
-extern unsigned long simple_strtoul(const char *,char **,unsigned int);
446
-extern long simple_strtol(const char *,char **,unsigned int);
447
-extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
448
-extern long long simple_strtoll(const char *,char **,unsigned int);
449332
450333 extern int num_to_str(char *buf, int size,
451334 unsigned long long num, unsigned int width);
....@@ -487,6 +370,7 @@
487370 extern int kernel_text_address(unsigned long addr);
488371 extern int func_ptr_is_kernel_text(void *ptr);
489372
373
+u64 int_pow(u64 base, unsigned int exp);
490374 unsigned long int_sqrt(unsigned long);
491375
492376 #if BITS_PER_LONG < 64
....@@ -499,12 +383,14 @@
499383 #endif
500384
501385 extern void bust_spinlocks(int yes);
502
-extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
503386 extern int panic_timeout;
387
+extern unsigned long panic_print;
504388 extern int panic_on_oops;
505389 extern int panic_on_unrecovered_nmi;
506390 extern int panic_on_io_nmi;
507391 extern int panic_on_warn;
392
+extern unsigned long panic_on_taint;
393
+extern bool panic_on_taint_nousertaint;
508394 extern int sysctl_panic_on_rcu_stall;
509395 extern int sysctl_panic_on_stackoverflow;
510396
....@@ -573,6 +459,7 @@
573459 #define TAINT_AUX 16
574460 #define TAINT_RANDSTRUCT 17
575461 #define TAINT_FLAGS_COUNT 18
462
+#define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1)
576463
577464 struct taint_flag {
578465 char c_true; /* character printed when tainted */
....@@ -604,7 +491,11 @@
604491 return buf;
605492 }
606493
494
+#ifdef __GENKSYMS__
607495 extern int hex_to_bin(char ch);
496
+#else
497
+extern int hex_to_bin(unsigned char ch);
498
+#endif
608499 extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
609500 extern char *bin2hex(char *dst, const void *src, size_t count);
610501
....@@ -698,7 +589,7 @@
698589 #define do_trace_printk(fmt, args...) \
699590 do { \
700591 static const char *trace_printk_fmt __used \
701
- __attribute__((section("__trace_printk_fmt"))) = \
592
+ __section("__trace_printk_fmt") = \
702593 __builtin_constant_p(fmt) ? fmt : NULL; \
703594 \
704595 __trace_printk_check_format(fmt, ##args); \
....@@ -742,7 +633,7 @@
742633
743634 #define trace_puts(str) ({ \
744635 static const char *trace_printk_fmt __used \
745
- __attribute__((section("__trace_printk_fmt"))) = \
636
+ __section("__trace_printk_fmt") = \
746637 __builtin_constant_p(str) ? str : NULL; \
747638 \
748639 if (__builtin_constant_p(str)) \
....@@ -764,7 +655,7 @@
764655 do { \
765656 if (__builtin_constant_p(fmt)) { \
766657 static const char *trace_printk_fmt __used \
767
- __attribute__((section("__trace_printk_fmt"))) = \
658
+ __section("__trace_printk_fmt") = \
768659 __builtin_constant_p(fmt) ? fmt : NULL; \
769660 \
770661 __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
....@@ -802,155 +693,6 @@
802693 }
803694 static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
804695 #endif /* CONFIG_TRACING */
805
-
806
-/*
807
- * min()/max()/clamp() macros must accomplish three things:
808
- *
809
- * - avoid multiple evaluations of the arguments (so side-effects like
810
- * "x++" happen only once) when non-constant.
811
- * - perform strict type-checking (to generate warnings instead of
812
- * nasty runtime surprises). See the "unnecessary" pointer comparison
813
- * in __typecheck().
814
- * - retain result as a constant expressions when called with only
815
- * constant expressions (to avoid tripping VLA warnings in stack
816
- * allocation usage).
817
- */
818
-#define __typecheck(x, y) \
819
- (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
820
-
821
-/*
822
- * This returns a constant expression while determining if an argument is
823
- * a constant expression, most importantly without evaluating the argument.
824
- * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
825
- */
826
-#define __is_constexpr(x) \
827
- (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
828
-
829
-#define __no_side_effects(x, y) \
830
- (__is_constexpr(x) && __is_constexpr(y))
831
-
832
-#define __safe_cmp(x, y) \
833
- (__typecheck(x, y) && __no_side_effects(x, y))
834
-
835
-#define __cmp(x, y, op) ((x) op (y) ? (x) : (y))
836
-
837
-#define __cmp_once(x, y, unique_x, unique_y, op) ({ \
838
- typeof(x) unique_x = (x); \
839
- typeof(y) unique_y = (y); \
840
- __cmp(unique_x, unique_y, op); })
841
-
842
-#define __careful_cmp(x, y, op) \
843
- __builtin_choose_expr(__safe_cmp(x, y), \
844
- __cmp(x, y, op), \
845
- __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op))
846
-
847
-/**
848
- * min - return minimum of two values of the same or compatible types
849
- * @x: first value
850
- * @y: second value
851
- */
852
-#define min(x, y) __careful_cmp(x, y, <)
853
-
854
-/**
855
- * max - return maximum of two values of the same or compatible types
856
- * @x: first value
857
- * @y: second value
858
- */
859
-#define max(x, y) __careful_cmp(x, y, >)
860
-
861
-/**
862
- * min3 - return minimum of three values
863
- * @x: first value
864
- * @y: second value
865
- * @z: third value
866
- */
867
-#define min3(x, y, z) min((typeof(x))min(x, y), z)
868
-
869
-/**
870
- * max3 - return maximum of three values
871
- * @x: first value
872
- * @y: second value
873
- * @z: third value
874
- */
875
-#define max3(x, y, z) max((typeof(x))max(x, y), z)
876
-
877
-/**
878
- * min_not_zero - return the minimum that is _not_ zero, unless both are zero
879
- * @x: value1
880
- * @y: value2
881
- */
882
-#define min_not_zero(x, y) ({ \
883
- typeof(x) __x = (x); \
884
- typeof(y) __y = (y); \
885
- __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
886
-
887
-/**
888
- * clamp - return a value clamped to a given range with strict typechecking
889
- * @val: current value
890
- * @lo: lowest allowable value
891
- * @hi: highest allowable value
892
- *
893
- * This macro does strict typechecking of @lo/@hi to make sure they are of the
894
- * same type as @val. See the unnecessary pointer comparisons.
895
- */
896
-#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
897
-
898
-/*
899
- * ..and if you can't take the strict
900
- * types, you can specify one yourself.
901
- *
902
- * Or not use min/max/clamp at all, of course.
903
- */
904
-
905
-/**
906
- * min_t - return minimum of two values, using the specified type
907
- * @type: data type to use
908
- * @x: first value
909
- * @y: second value
910
- */
911
-#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <)
912
-
913
-/**
914
- * max_t - return maximum of two values, using the specified type
915
- * @type: data type to use
916
- * @x: first value
917
- * @y: second value
918
- */
919
-#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >)
920
-
921
-/**
922
- * clamp_t - return a value clamped to a given range using a given type
923
- * @type: the type of variable to use
924
- * @val: current value
925
- * @lo: minimum allowable value
926
- * @hi: maximum allowable value
927
- *
928
- * This macro does no typechecking and uses temporary variables of type
929
- * @type to make all the comparisons.
930
- */
931
-#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
932
-
933
-/**
934
- * clamp_val - return a value clamped to a given range using val's type
935
- * @val: current value
936
- * @lo: minimum allowable value
937
- * @hi: maximum allowable value
938
- *
939
- * This macro does no typechecking and uses temporary variables of whatever
940
- * type the input argument @val is. This is useful when @val is an unsigned
941
- * type and @lo and @hi are literals that will otherwise be assigned a signed
942
- * integer type.
943
- */
944
-#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
945
-
946
-
947
-/**
948
- * swap - swap values of @a and @b
949
- * @a: first value
950
- * @b: second value
951
- */
952
-#define swap(a, b) \
953
- do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
954696
955697 /* This counts to 12. Any more, it will return 13th argument. */
956698 #define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n