.. | .. |
---|
51 | 51 | static inline void print_stack_overflow(void) { } |
---|
52 | 52 | #endif |
---|
53 | 53 | |
---|
54 | | -DEFINE_PER_CPU(struct irq_stack *, hardirq_stack); |
---|
55 | | -DEFINE_PER_CPU(struct irq_stack *, softirq_stack); |
---|
| 54 | +DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr); |
---|
| 55 | +DEFINE_PER_CPU(struct irq_stack *, softirq_stack_ptr); |
---|
56 | 56 | |
---|
57 | 57 | static void call_on_stack(void *func, void *stack) |
---|
58 | 58 | { |
---|
.. | .. |
---|
76 | 76 | u32 *isp, *prev_esp, arg1; |
---|
77 | 77 | |
---|
78 | 78 | curstk = (struct irq_stack *) current_stack(); |
---|
79 | | - irqstk = __this_cpu_read(hardirq_stack); |
---|
| 79 | + irqstk = __this_cpu_read(hardirq_stack_ptr); |
---|
80 | 80 | |
---|
81 | 81 | /* |
---|
82 | 82 | * this is where we switch to the IRQ stack. However, if we are |
---|
.. | .. |
---|
107 | 107 | } |
---|
108 | 108 | |
---|
109 | 109 | /* |
---|
110 | | - * allocate per-cpu stacks for hardirq and for softirq processing |
---|
| 110 | + * Allocate per-cpu stacks for hardirq and softirq processing |
---|
111 | 111 | */ |
---|
112 | | -void irq_ctx_init(int cpu) |
---|
| 112 | +int irq_init_percpu_irqstack(unsigned int cpu) |
---|
113 | 113 | { |
---|
114 | | - struct irq_stack *irqstk; |
---|
| 114 | + int node = cpu_to_node(cpu); |
---|
| 115 | + struct page *ph, *ps; |
---|
115 | 116 | |
---|
116 | | - if (per_cpu(hardirq_stack, cpu)) |
---|
117 | | - return; |
---|
| 117 | + if (per_cpu(hardirq_stack_ptr, cpu)) |
---|
| 118 | + return 0; |
---|
118 | 119 | |
---|
119 | | - irqstk = page_address(alloc_pages_node(cpu_to_node(cpu), |
---|
120 | | - THREADINFO_GFP, |
---|
121 | | - THREAD_SIZE_ORDER)); |
---|
122 | | - per_cpu(hardirq_stack, cpu) = irqstk; |
---|
| 120 | + ph = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER); |
---|
| 121 | + if (!ph) |
---|
| 122 | + return -ENOMEM; |
---|
| 123 | + ps = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER); |
---|
| 124 | + if (!ps) { |
---|
| 125 | + __free_pages(ph, THREAD_SIZE_ORDER); |
---|
| 126 | + return -ENOMEM; |
---|
| 127 | + } |
---|
123 | 128 | |
---|
124 | | - irqstk = page_address(alloc_pages_node(cpu_to_node(cpu), |
---|
125 | | - THREADINFO_GFP, |
---|
126 | | - THREAD_SIZE_ORDER)); |
---|
127 | | - per_cpu(softirq_stack, cpu) = irqstk; |
---|
128 | | - |
---|
129 | | - printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", |
---|
130 | | - cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu)); |
---|
| 129 | + per_cpu(hardirq_stack_ptr, cpu) = page_address(ph); |
---|
| 130 | + per_cpu(softirq_stack_ptr, cpu) = page_address(ps); |
---|
| 131 | + return 0; |
---|
131 | 132 | } |
---|
132 | 133 | |
---|
133 | | -#ifndef CONFIG_PREEMPT_RT_FULL |
---|
134 | 134 | void do_softirq_own_stack(void) |
---|
135 | 135 | { |
---|
136 | 136 | struct irq_stack *irqstk; |
---|
137 | 137 | u32 *isp, *prev_esp; |
---|
138 | 138 | |
---|
139 | | - irqstk = __this_cpu_read(softirq_stack); |
---|
| 139 | + irqstk = __this_cpu_read(softirq_stack_ptr); |
---|
140 | 140 | |
---|
141 | 141 | /* build the stack frame on the softirq stack */ |
---|
142 | 142 | isp = (u32 *) ((char *)irqstk + sizeof(*irqstk)); |
---|
.. | .. |
---|
147 | 147 | |
---|
148 | 148 | call_on_stack(__do_softirq, isp); |
---|
149 | 149 | } |
---|
150 | | -#endif |
---|
151 | 150 | |
---|
152 | | -bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) |
---|
| 151 | +void __handle_irq(struct irq_desc *desc, struct pt_regs *regs) |
---|
153 | 152 | { |
---|
154 | 153 | int overflow = check_stack_overflow(); |
---|
155 | | - |
---|
156 | | - if (IS_ERR_OR_NULL(desc)) |
---|
157 | | - return false; |
---|
158 | 154 | |
---|
159 | 155 | if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) { |
---|
160 | 156 | if (unlikely(overflow)) |
---|
161 | 157 | print_stack_overflow(); |
---|
162 | 158 | generic_handle_irq_desc(desc); |
---|
163 | 159 | } |
---|
164 | | - |
---|
165 | | - return true; |
---|
166 | 160 | } |
---|