.. | .. |
---|
4 | 4 | #include <linux/kernel.h> |
---|
5 | 5 | #include <linux/export.h> |
---|
6 | 6 | #include <linux/init.h> |
---|
7 | | -#include <linux/bootmem.h> |
---|
| 7 | +#include <linux/memblock.h> |
---|
8 | 8 | #include <linux/percpu.h> |
---|
9 | 9 | #include <linux/kexec.h> |
---|
10 | 10 | #include <linux/crash_dump.h> |
---|
.. | .. |
---|
106 | 106 | void *ptr; |
---|
107 | 107 | |
---|
108 | 108 | if (!node_online(node) || !NODE_DATA(node)) { |
---|
109 | | - ptr = __alloc_bootmem_nopanic(size, align, goal); |
---|
| 109 | + ptr = memblock_alloc_from(size, align, goal); |
---|
110 | 110 | pr_info("cpu %d has no node %d or node-local memory\n", |
---|
111 | 111 | cpu, node); |
---|
112 | 112 | pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", |
---|
113 | 113 | cpu, size, __pa(ptr)); |
---|
114 | 114 | } else { |
---|
115 | | - ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node), |
---|
116 | | - size, align, goal); |
---|
| 115 | + ptr = memblock_alloc_try_nid(size, align, goal, |
---|
| 116 | + MEMBLOCK_ALLOC_ACCESSIBLE, |
---|
| 117 | + node); |
---|
| 118 | + |
---|
117 | 119 | pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n", |
---|
118 | 120 | cpu, size, node, __pa(ptr)); |
---|
119 | 121 | } |
---|
120 | 122 | return ptr; |
---|
121 | 123 | #else |
---|
122 | | - return __alloc_bootmem_nopanic(size, align, goal); |
---|
| 124 | + return memblock_alloc_from(size, align, goal); |
---|
123 | 125 | #endif |
---|
124 | 126 | } |
---|
125 | 127 | |
---|
.. | .. |
---|
133 | 135 | |
---|
134 | 136 | static void __init pcpu_fc_free(void *ptr, size_t size) |
---|
135 | 137 | { |
---|
136 | | - free_bootmem(__pa(ptr), size); |
---|
| 138 | + memblock_free(__pa(ptr), size); |
---|
137 | 139 | } |
---|
138 | 140 | |
---|
139 | 141 | static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) |
---|
.. | .. |
---|
169 | 171 | unsigned long delta; |
---|
170 | 172 | int rc; |
---|
171 | 173 | |
---|
172 | | - pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%d\n", |
---|
| 174 | + pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%u\n", |
---|
173 | 175 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); |
---|
174 | 176 | |
---|
175 | 177 | /* |
---|
.. | .. |
---|
205 | 207 | pcpu_cpu_distance, |
---|
206 | 208 | pcpu_fc_alloc, pcpu_fc_free); |
---|
207 | 209 | if (rc < 0) |
---|
208 | | - pr_warning("%s allocator failed (%d), falling back to page size\n", |
---|
209 | | - pcpu_fc_names[pcpu_chosen_fc], rc); |
---|
| 210 | + pr_warn("%s allocator failed (%d), falling back to page size\n", |
---|
| 211 | + pcpu_fc_names[pcpu_chosen_fc], rc); |
---|
210 | 212 | } |
---|
211 | 213 | if (rc < 0) |
---|
212 | 214 | rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, |
---|
.. | .. |
---|
241 | 243 | #ifdef CONFIG_X86_32 |
---|
242 | 244 | per_cpu(x86_cpu_to_logical_apicid, cpu) = |
---|
243 | 245 | early_per_cpu_map(x86_cpu_to_logical_apicid, cpu); |
---|
244 | | -#endif |
---|
245 | | -#ifdef CONFIG_X86_64 |
---|
246 | | - per_cpu(irq_stack_ptr, cpu) = |
---|
247 | | - per_cpu(irq_stack_union.irq_stack, cpu) + |
---|
248 | | - IRQ_STACK_SIZE; |
---|
249 | 246 | #endif |
---|
250 | 247 | #ifdef CONFIG_NUMA |
---|
251 | 248 | per_cpu(x86_cpu_to_node_map, cpu) = |
---|
.. | .. |
---|
290 | 287 | /* |
---|
291 | 288 | * Sync back kernel address range again. We already did this in |
---|
292 | 289 | * setup_arch(), but percpu data also needs to be available in |
---|
293 | | - * the smpboot asm. We can't reliably pick up percpu mappings |
---|
294 | | - * using vmalloc_fault(), because exception dispatch needs |
---|
295 | | - * percpu data. |
---|
| 290 | + * the smpboot asm and arch_sync_kernel_mappings() doesn't sync to |
---|
| 291 | + * swapper_pg_dir on 32-bit. The per-cpu mappings need to be available |
---|
| 292 | + * there too. |
---|
296 | 293 | * |
---|
297 | 294 | * FIXME: Can the later sync in setup_cpu_entry_areas() replace |
---|
298 | 295 | * this call? |
---|