From d0a428a6556ea5a006e22e28b0b1cd037885fe20 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 22 Nov 2023 01:07:12 +0000
Subject: [PATCH] add gmac0 read mac form eeprom,pcie30x1
---
kernel/include/linux/sched.h | 162 ++++++++++++++++++++++++++++++++++++++++++++++++++++-
1 files changed, 157 insertions(+), 5 deletions(-)
diff --git a/kernel/include/linux/sched.h b/kernel/include/linux/sched.h
index 497cfcc..85ddd3b 100644
--- a/kernel/include/linux/sched.h
+++ b/kernel/include/linux/sched.h
@@ -30,6 +30,7 @@
#include <linux/task_io_accounting.h>
#include <linux/rseq.h>
#include <linux/android_kabi.h>
+#include <asm/kmap_types.h>
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
@@ -103,11 +104,7 @@
__TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
TASK_PARKED)
-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
-
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
-
-#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0 && \
@@ -136,6 +133,9 @@
smp_store_mb(current->state, (state_value)); \
} while (0)
+#define __set_current_state_no_track(state_value) \
+ current->state = (state_value);
+
#define set_special_state(state_value) \
do { \
unsigned long flags; /* may shadow */ \
@@ -145,6 +145,7 @@
current->state = (state_value); \
raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
} while (0)
+
#else
/*
* set_current_state() includes a barrier so that the write of current->state
@@ -189,6 +190,9 @@
#define set_current_state(state_value) \
smp_store_mb(current->state, (state_value))
+#define __set_current_state_no_track(state_value) \
+ __set_current_state(state_value)
+
/*
* set_special_state() should be used for those states when the blocking task
* can not use the regular condition based wait-loop. In that case we must
@@ -224,6 +228,8 @@
extern void io_schedule_finish(int token);
extern long io_schedule_timeout(long timeout);
extern void io_schedule(void);
+
+int cpu_nr_pinned(int cpu);
/**
* struct prev_cputime - snapshot of system and user cputime
@@ -654,6 +660,8 @@
#endif
/* -1 unrunnable, 0 runnable, >0 stopped: */
volatile long state;
+ /* saved state for "spinlock sleepers" */
+ volatile long saved_state;
/*
* This begins the randomizable portion of task_struct. Only
@@ -728,8 +736,24 @@
unsigned int policy;
int nr_cpus_allowed;
- cpumask_t cpus_allowed;
+// cpumask_t cpus_allowed;
cpumask_t cpus_requested;
+ const cpumask_t *cpus_ptr;
+ cpumask_t cpus_mask;
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
+ int migrate_disable;
+ bool migrate_disable_scheduled;
+# ifdef CONFIG_SCHED_DEBUG
+ int pinned_on_cpu;
+# endif
+#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
+# ifdef CONFIG_SCHED_DEBUG
+ int migrate_disable;
+# endif
+#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+ int sleeping_lock;
+#endif
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
@@ -901,6 +925,9 @@
#ifdef CONFIG_POSIX_TIMERS
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
+#ifdef CONFIG_PREEMPT_RT_BASE
+ struct task_struct *posix_timer_list;
+#endif
#endif
/* Process credentials: */
@@ -945,11 +972,17 @@
/* Signal handlers: */
struct signal_struct *signal;
struct sighand_struct *sighand;
+ struct sigqueue *sigqueue_cache;
+
sigset_t blocked;
sigset_t real_blocked;
/* Restored if set_restore_sigmask() was used: */
sigset_t saved_sigmask;
struct sigpending pending;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ /* TODO: move me into ->restart_block ? */
+ struct siginfo forced_info;
+#endif
unsigned long sas_ss_sp;
size_t sas_ss_size;
unsigned int sas_ss_flags;
@@ -974,6 +1007,7 @@
raw_spinlock_t pi_lock;
struct wake_q_node wake_q;
+ struct wake_q_node wake_q_sleeper;
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task: */
@@ -1271,8 +1305,22 @@
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
+#ifdef CONFIG_PREEMPT_RT_BASE
+ struct rcu_head put_rcu;
+ int softirq_nestcnt;
+ unsigned int softirqs_raised;
+#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
+ int kmap_idx;
+ pte_t kmap_pte[KM_TYPE_NR];
+# endif
+#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
+#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+ int xmit_recursion;
#endif
int pagefault_disabled;
#ifdef CONFIG_MMU
@@ -1490,6 +1538,7 @@
/*
* Per process flags
*/
+#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */
#define PF_IDLE 0x00000002 /* I am an IDLE thread */
#define PF_EXITING 0x00000004 /* Getting shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
@@ -1717,6 +1766,7 @@
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
+extern int wake_up_lock_sleeper(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);
#ifdef CONFIG_SMP
@@ -1799,6 +1849,89 @@
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
+#ifdef CONFIG_PREEMPT_LAZY
+static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
+{
+ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
+}
+
+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
+{
+ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
+}
+
+static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
+{
+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
+}
+
+static inline int need_resched_lazy(void)
+{
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
+}
+
+static inline int need_resched_now(void)
+{
+ return test_thread_flag(TIF_NEED_RESCHED);
+}
+
+#else
+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
+static inline int need_resched_lazy(void) { return 0; }
+
+static inline int need_resched_now(void)
+{
+ return test_thread_flag(TIF_NEED_RESCHED);
+}
+
+#endif
+
+
+static inline bool __task_is_stopped_or_traced(struct task_struct *task)
+{
+ if (task->state & (__TASK_STOPPED | __TASK_TRACED))
+ return true;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED))
+ return true;
+#endif
+ return false;
+}
+
+static inline bool task_is_stopped_or_traced(struct task_struct *task)
+{
+ bool traced_stopped;
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
+ traced_stopped = __task_is_stopped_or_traced(task);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+#else
+ traced_stopped = __task_is_stopped_or_traced(task);
+#endif
+ return traced_stopped;
+}
+
+static inline bool task_is_traced(struct task_struct *task)
+{
+ bool traced = false;
+
+ if (task->state & __TASK_TRACED)
+ return true;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ /* in case the task is sleeping on tasklist_lock */
+ raw_spin_lock_irq(&task->pi_lock);
+ if (task->state & __TASK_TRACED)
+ traced = true;
+ else if (task->saved_state & __TASK_TRACED)
+ traced = true;
+ raw_spin_unlock_irq(&task->pi_lock);
+#endif
+ return traced;
+}
+
/*
* cond_resched() and cond_resched_lock(): latency reduction via
* explicit rescheduling in places that are safe. The return
@@ -1850,6 +1983,23 @@
{
return unlikely(tif_need_resched());
}
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+static inline void sleeping_lock_inc(void)
+{
+ current->sleeping_lock++;
+}
+
+static inline void sleeping_lock_dec(void)
+{
+ current->sleeping_lock--;
+}
+
+#else
+
+static inline void sleeping_lock_inc(void) { }
+static inline void sleeping_lock_dec(void) { }
+#endif
/*
* Wrappers for p->thread_info->cpu access. No-op on UP.
@@ -2022,4 +2172,6 @@
#endif
+extern struct task_struct *takedown_cpu_task;
+
#endif
--
Gitblit v1.6.2