hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/arch/x86/kernel/irq_32.c
....@@ -51,8 +51,8 @@
5151 static inline void print_stack_overflow(void) { }
5252 #endif
5353
54
-DEFINE_PER_CPU(struct irq_stack *, hardirq_stack);
55
-DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
54
+DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
55
+DEFINE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
5656
5757 static void call_on_stack(void *func, void *stack)
5858 {
....@@ -76,7 +76,7 @@
7676 u32 *isp, *prev_esp, arg1;
7777
7878 curstk = (struct irq_stack *) current_stack();
79
- irqstk = __this_cpu_read(hardirq_stack);
79
+ irqstk = __this_cpu_read(hardirq_stack_ptr);
8080
8181 /*
8282 * this is where we switch to the IRQ stack. However, if we are
....@@ -107,36 +107,36 @@
107107 }
108108
109109 /*
110
- * allocate per-cpu stacks for hardirq and for softirq processing
110
+ * Allocate per-cpu stacks for hardirq and softirq processing
111111 */
112
-void irq_ctx_init(int cpu)
112
+int irq_init_percpu_irqstack(unsigned int cpu)
113113 {
114
- struct irq_stack *irqstk;
114
+ int node = cpu_to_node(cpu);
115
+ struct page *ph, *ps;
115116
116
- if (per_cpu(hardirq_stack, cpu))
117
- return;
117
+ if (per_cpu(hardirq_stack_ptr, cpu))
118
+ return 0;
118119
119
- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
120
- THREADINFO_GFP,
121
- THREAD_SIZE_ORDER));
122
- per_cpu(hardirq_stack, cpu) = irqstk;
120
+ ph = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
121
+ if (!ph)
122
+ return -ENOMEM;
123
+ ps = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
124
+ if (!ps) {
125
+ __free_pages(ph, THREAD_SIZE_ORDER);
126
+ return -ENOMEM;
127
+ }
123128
124
- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
125
- THREADINFO_GFP,
126
- THREAD_SIZE_ORDER));
127
- per_cpu(softirq_stack, cpu) = irqstk;
128
-
129
- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
130
- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
129
+ per_cpu(hardirq_stack_ptr, cpu) = page_address(ph);
130
+ per_cpu(softirq_stack_ptr, cpu) = page_address(ps);
131
+ return 0;
131132 }
132133
133
-#ifndef CONFIG_PREEMPT_RT_FULL
134134 void do_softirq_own_stack(void)
135135 {
136136 struct irq_stack *irqstk;
137137 u32 *isp, *prev_esp;
138138
139
- irqstk = __this_cpu_read(softirq_stack);
139
+ irqstk = __this_cpu_read(softirq_stack_ptr);
140140
141141 /* build the stack frame on the softirq stack */
142142 isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
....@@ -147,20 +147,14 @@
147147
148148 call_on_stack(__do_softirq, isp);
149149 }
150
-#endif
151150
152
-bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
151
+void __handle_irq(struct irq_desc *desc, struct pt_regs *regs)
153152 {
154153 int overflow = check_stack_overflow();
155
-
156
- if (IS_ERR_OR_NULL(desc))
157
- return false;
158154
159155 if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) {
160156 if (unlikely(overflow))
161157 print_stack_overflow();
162158 generic_handle_irq_desc(desc);
163159 }
164
-
165
- return true;
166160 }