From a5969cabbb4660eab42b6ef0412cbbd1200cf14d Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Sat, 12 Oct 2024 07:10:09 +0000 Subject: [PATCH] 修改led为gpio --- kernel/arch/x86/include/asm/preempt.h | 48 ++++++++++-------------------------------------- 1 files changed, 10 insertions(+), 38 deletions(-) diff --git a/kernel/arch/x86/include/asm/preempt.h b/kernel/arch/x86/include/asm/preempt.h index afa0e42..a334dd0 100644 --- a/kernel/arch/x86/include/asm/preempt.h +++ b/kernel/arch/x86/include/asm/preempt.h @@ -8,6 +8,9 @@ DECLARE_PER_CPU(int, __preempt_count); +/* We use the MSB mostly because its available */ +#define PREEMPT_NEED_RESCHED 0x80000000 + /* * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such * that a decrement hitting 0 means we can and should reschedule. @@ -40,7 +43,7 @@ #define init_task_preempt_count(p) do { } while (0) #define init_idle_preempt_count(p, cpu) do { \ - per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \ + per_cpu(__preempt_count, (cpu)) = PREEMPT_DISABLED; \ } while (0) /* @@ -86,24 +89,9 @@ * a decrement which hits zero means we have no preempt_count and should * reschedule. */ -static __always_inline bool ____preempt_count_dec_and_test(void) -{ - GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e); -} - static __always_inline bool __preempt_count_dec_and_test(void) { - if (____preempt_count_dec_and_test()) - return true; -#ifdef CONFIG_PREEMPT_LAZY - if (preempt_count()) - return false; - if (current_thread_info()->preempt_lazy_count) - return false; - return test_thread_flag(TIF_NEED_RESCHED_LAZY); -#else - return false; -#endif + return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var])); } /* @@ -111,34 +99,18 @@ */ static __always_inline bool should_resched(int preempt_offset) { -#ifdef CONFIG_PREEMPT_LAZY - u32 tmp; - - tmp = raw_cpu_read_4(__preempt_count); - if (tmp == preempt_offset) - return true; - - /* preempt count == 0 ? */ - tmp &= ~PREEMPT_NEED_RESCHED; - if (tmp != preempt_offset) - return false; - if (current_thread_info()->preempt_lazy_count) - return false; - return test_thread_flag(TIF_NEED_RESCHED_LAZY); -#else return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); -#endif } -#ifdef CONFIG_PREEMPT - extern asmlinkage void ___preempt_schedule(void); +#ifdef CONFIG_PREEMPTION + extern asmlinkage void preempt_schedule_thunk(void); # define __preempt_schedule() \ - asm volatile ("call ___preempt_schedule" : ASM_CALL_CONSTRAINT) + asm volatile ("call preempt_schedule_thunk" : ASM_CALL_CONSTRAINT) extern asmlinkage void preempt_schedule(void); - extern asmlinkage void ___preempt_schedule_notrace(void); + extern asmlinkage void preempt_schedule_notrace_thunk(void); # define __preempt_schedule_notrace() \ - asm volatile ("call ___preempt_schedule_notrace" : ASM_CALL_CONSTRAINT) + asm volatile ("call preempt_schedule_notrace_thunk" : ASM_CALL_CONSTRAINT) extern asmlinkage void preempt_schedule_notrace(void); #endif -- Gitblit v1.6.2