.. | .. |
---|
20 | 20 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
---|
21 | 21 | |
---|
22 | 22 | #include <linux/workqueue.h> |
---|
23 | | -#include <linux/bootmem.h> |
---|
| 23 | +#include <linux/memblock.h> |
---|
24 | 24 | #include <linux/export.h> |
---|
25 | 25 | #include <linux/init.h> |
---|
26 | 26 | #include <linux/mm.h> |
---|
.. | .. |
---|
35 | 35 | #include <linux/sched/hotplug.h> |
---|
36 | 36 | #include <linux/sched/task_stack.h> |
---|
37 | 37 | #include <linux/crash_dump.h> |
---|
38 | | -#include <linux/memblock.h> |
---|
39 | 38 | #include <linux/kprobes.h> |
---|
40 | 39 | #include <asm/asm-offsets.h> |
---|
41 | 40 | #include <asm/diag.h> |
---|
.. | .. |
---|
54 | 53 | #include <asm/sigp.h> |
---|
55 | 54 | #include <asm/idle.h> |
---|
56 | 55 | #include <asm/nmi.h> |
---|
| 56 | +#include <asm/stacktrace.h> |
---|
57 | 57 | #include <asm/topology.h> |
---|
58 | 58 | #include "entry.h" |
---|
59 | 59 | |
---|
.. | .. |
---|
61 | 61 | ec_schedule = 0, |
---|
62 | 62 | ec_call_function_single, |
---|
63 | 63 | ec_stop_cpu, |
---|
| 64 | + ec_mcck_pending, |
---|
64 | 65 | }; |
---|
65 | 66 | |
---|
66 | 67 | enum { |
---|
.. | .. |
---|
145 | 146 | |
---|
146 | 147 | static inline int pcpu_stopped(struct pcpu *pcpu) |
---|
147 | 148 | { |
---|
148 | | - u32 uninitialized_var(status); |
---|
| 149 | + u32 status; |
---|
149 | 150 | |
---|
150 | 151 | if (__pcpu_sigp(pcpu->address, SIGP_SENSE, |
---|
151 | 152 | 0, &status) != SIGP_CC_STATUS_STORED) |
---|
.. | .. |
---|
186 | 187 | pcpu_sigp_retry(pcpu, order, 0); |
---|
187 | 188 | } |
---|
188 | 189 | |
---|
189 | | -#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE) |
---|
190 | | -#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE) |
---|
191 | | - |
---|
192 | 190 | static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) |
---|
193 | 191 | { |
---|
194 | | - unsigned long async_stack, panic_stack; |
---|
| 192 | + unsigned long async_stack, nodat_stack; |
---|
195 | 193 | struct lowcore *lc; |
---|
196 | 194 | |
---|
197 | 195 | if (pcpu != &pcpu_devices[0]) { |
---|
198 | 196 | pcpu->lowcore = (struct lowcore *) |
---|
199 | 197 | __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); |
---|
200 | | - async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); |
---|
201 | | - panic_stack = __get_free_page(GFP_KERNEL); |
---|
202 | | - if (!pcpu->lowcore || !panic_stack || !async_stack) |
---|
| 198 | + nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); |
---|
| 199 | + if (!pcpu->lowcore || !nodat_stack) |
---|
203 | 200 | goto out; |
---|
204 | 201 | } else { |
---|
205 | | - async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET; |
---|
206 | | - panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET; |
---|
| 202 | + nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET; |
---|
207 | 203 | } |
---|
| 204 | + async_stack = stack_alloc(); |
---|
| 205 | + if (!async_stack) |
---|
| 206 | + goto out; |
---|
208 | 207 | lc = pcpu->lowcore; |
---|
209 | 208 | memcpy(lc, &S390_lowcore, 512); |
---|
210 | 209 | memset((char *) lc + 512, 0, sizeof(*lc) - 512); |
---|
211 | | - lc->async_stack = async_stack + ASYNC_FRAME_OFFSET; |
---|
212 | | - lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET; |
---|
| 210 | + lc->async_stack = async_stack + STACK_INIT_OFFSET; |
---|
| 211 | + lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET; |
---|
213 | 212 | lc->cpu_nr = cpu; |
---|
214 | 213 | lc->spinlock_lockval = arch_spin_lockval(cpu); |
---|
215 | 214 | lc->spinlock_index = 0; |
---|
216 | 215 | lc->br_r1_trampoline = 0x07f1; /* br %r1 */ |
---|
| 216 | + lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); |
---|
| 217 | + lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); |
---|
| 218 | + lc->preempt_count = PREEMPT_DISABLED; |
---|
217 | 219 | if (nmi_alloc_per_cpu(lc)) |
---|
218 | | - goto out; |
---|
| 220 | + goto out_async; |
---|
219 | 221 | if (vdso_alloc_per_cpu(lc)) |
---|
220 | 222 | goto out_mcesa; |
---|
221 | 223 | lowcore_ptr[cpu] = lc; |
---|
.. | .. |
---|
224 | 226 | |
---|
225 | 227 | out_mcesa: |
---|
226 | 228 | nmi_free_per_cpu(lc); |
---|
| 229 | +out_async: |
---|
| 230 | + stack_free(async_stack); |
---|
227 | 231 | out: |
---|
228 | 232 | if (pcpu != &pcpu_devices[0]) { |
---|
229 | | - free_page(panic_stack); |
---|
230 | | - free_pages(async_stack, ASYNC_ORDER); |
---|
| 233 | + free_pages(nodat_stack, THREAD_SIZE_ORDER); |
---|
231 | 234 | free_pages((unsigned long) pcpu->lowcore, LC_ORDER); |
---|
232 | 235 | } |
---|
233 | 236 | return -ENOMEM; |
---|
234 | 237 | } |
---|
235 | 238 | |
---|
236 | | -#ifdef CONFIG_HOTPLUG_CPU |
---|
237 | | - |
---|
238 | 239 | static void pcpu_free_lowcore(struct pcpu *pcpu) |
---|
239 | 240 | { |
---|
| 241 | + unsigned long async_stack, nodat_stack, lowcore; |
---|
| 242 | + |
---|
| 243 | + nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET; |
---|
| 244 | + async_stack = pcpu->lowcore->async_stack - STACK_INIT_OFFSET; |
---|
| 245 | + lowcore = (unsigned long) pcpu->lowcore; |
---|
| 246 | + |
---|
240 | 247 | pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); |
---|
241 | 248 | lowcore_ptr[pcpu - pcpu_devices] = NULL; |
---|
242 | 249 | vdso_free_per_cpu(pcpu->lowcore); |
---|
243 | 250 | nmi_free_per_cpu(pcpu->lowcore); |
---|
| 251 | + stack_free(async_stack); |
---|
244 | 252 | if (pcpu == &pcpu_devices[0]) |
---|
245 | 253 | return; |
---|
246 | | - free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET); |
---|
247 | | - free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER); |
---|
248 | | - free_pages((unsigned long) pcpu->lowcore, LC_ORDER); |
---|
| 254 | + free_pages(nodat_stack, THREAD_SIZE_ORDER); |
---|
| 255 | + free_pages(lowcore, LC_ORDER); |
---|
249 | 256 | } |
---|
250 | | - |
---|
251 | | -#endif /* CONFIG_HOTPLUG_CPU */ |
---|
252 | 257 | |
---|
253 | 258 | static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) |
---|
254 | 259 | { |
---|
.. | .. |
---|
263 | 268 | lc->kernel_asce = S390_lowcore.kernel_asce; |
---|
264 | 269 | lc->user_asce = S390_lowcore.kernel_asce; |
---|
265 | 270 | lc->machine_flags = S390_lowcore.machine_flags; |
---|
266 | | - lc->user_timer = lc->system_timer = lc->steal_timer = 0; |
---|
| 271 | + lc->user_timer = lc->system_timer = |
---|
| 272 | + lc->steal_timer = lc->avg_steal_timer = 0; |
---|
267 | 273 | __ctl_store(lc->cregs_save_area, 0, 15); |
---|
268 | 274 | lc->cregs_save_area[1] = lc->kernel_asce; |
---|
269 | 275 | lc->cregs_save_area[7] = lc->vdso_asce; |
---|
.. | .. |
---|
296 | 302 | { |
---|
297 | 303 | struct lowcore *lc = pcpu->lowcore; |
---|
298 | 304 | |
---|
299 | | - lc->restart_stack = lc->kernel_stack; |
---|
| 305 | + lc->restart_stack = lc->nodat_stack; |
---|
300 | 306 | lc->restart_fn = (unsigned long) func; |
---|
301 | 307 | lc->restart_data = (unsigned long) data; |
---|
302 | 308 | lc->restart_source = -1UL; |
---|
.. | .. |
---|
306 | 312 | /* |
---|
307 | 313 | * Call function via PSW restart on pcpu and stop the current cpu. |
---|
308 | 314 | */ |
---|
309 | | -static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *), |
---|
310 | | - void *data, unsigned long stack) |
---|
| 315 | +static void __pcpu_delegate(void (*func)(void*), void *data) |
---|
| 316 | +{ |
---|
| 317 | + func(data); /* should not return */ |
---|
| 318 | +} |
---|
| 319 | + |
---|
| 320 | +static void __no_sanitize_address pcpu_delegate(struct pcpu *pcpu, |
---|
| 321 | + void (*func)(void *), |
---|
| 322 | + void *data, unsigned long stack) |
---|
311 | 323 | { |
---|
312 | 324 | struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices]; |
---|
313 | 325 | unsigned long source_cpu = stap(); |
---|
314 | 326 | |
---|
315 | | - __load_psw_mask(PSW_KERNEL_BITS); |
---|
| 327 | + __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); |
---|
316 | 328 | if (pcpu->address == source_cpu) |
---|
317 | | - func(data); /* should not return */ |
---|
| 329 | + CALL_ON_STACK(__pcpu_delegate, stack, 2, func, data); |
---|
318 | 330 | /* Stop target cpu (if func returns this stops the current cpu). */ |
---|
319 | 331 | pcpu_sigp_retry(pcpu, SIGP_STOP, 0); |
---|
320 | 332 | /* Restart func on the target cpu and stop the current cpu. */ |
---|
.. | .. |
---|
380 | 392 | lc = &S390_lowcore; |
---|
381 | 393 | |
---|
382 | 394 | pcpu_delegate(&pcpu_devices[0], func, data, |
---|
383 | | - lc->panic_stack - PANIC_FRAME_OFFSET + PAGE_SIZE); |
---|
| 395 | + lc->nodat_stack); |
---|
384 | 396 | } |
---|
385 | 397 | |
---|
386 | 398 | int smp_find_processor_id(u16 address) |
---|
.. | .. |
---|
391 | 403 | if (pcpu_devices[cpu].address == address) |
---|
392 | 404 | return cpu; |
---|
393 | 405 | return -1; |
---|
| 406 | +} |
---|
| 407 | + |
---|
| 408 | +void schedule_mcck_handler(void) |
---|
| 409 | +{ |
---|
| 410 | + pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_mcck_pending); |
---|
394 | 411 | } |
---|
395 | 412 | |
---|
396 | 413 | bool notrace arch_vcpu_is_preempted(int cpu) |
---|
.. | .. |
---|
405 | 422 | |
---|
406 | 423 | void notrace smp_yield_cpu(int cpu) |
---|
407 | 424 | { |
---|
408 | | - if (MACHINE_HAS_DIAG9C) { |
---|
409 | | - diag_stat_inc_norecursion(DIAG_STAT_X09C); |
---|
410 | | - asm volatile("diag %0,0,0x9c" |
---|
411 | | - : : "d" (pcpu_devices[cpu].address)); |
---|
412 | | - } else if (MACHINE_HAS_DIAG44) { |
---|
413 | | - diag_stat_inc_norecursion(DIAG_STAT_X044); |
---|
414 | | - asm volatile("diag 0,0,0x44"); |
---|
415 | | - } |
---|
| 425 | + if (!MACHINE_HAS_DIAG9C) |
---|
| 426 | + return; |
---|
| 427 | + diag_stat_inc_norecursion(DIAG_STAT_X09C); |
---|
| 428 | + asm volatile("diag %0,0,0x9c" |
---|
| 429 | + : : "d" (pcpu_devices[cpu].address)); |
---|
416 | 430 | } |
---|
417 | 431 | |
---|
418 | 432 | /* |
---|
.. | .. |
---|
490 | 504 | scheduler_ipi(); |
---|
491 | 505 | if (test_bit(ec_call_function_single, &bits)) |
---|
492 | 506 | generic_smp_call_function_single_interrupt(); |
---|
| 507 | + if (test_bit(ec_mcck_pending, &bits)) |
---|
| 508 | + s390_handle_mcck(); |
---|
493 | 509 | } |
---|
494 | 510 | |
---|
495 | 511 | static void do_ext_call_interrupt(struct ext_code ext_code, |
---|
.. | .. |
---|
591 | 607 | /* |
---|
592 | 608 | * Collect CPU state of the previous, crashed system. |
---|
593 | 609 | * There are four cases: |
---|
594 | | - * 1) standard zfcp dump |
---|
595 | | - * condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP |
---|
| 610 | + * 1) standard zfcp/nvme dump |
---|
| 611 | + * condition: OLDMEM_BASE == NULL && is_ipl_type_dump() == true |
---|
596 | 612 | * The state for all CPUs except the boot CPU needs to be collected |
---|
597 | 613 | * with sigp stop-and-store-status. The boot CPU state is located in |
---|
598 | 614 | * the absolute lowcore of the memory stored in the HSA. The zcore code |
---|
599 | 615 | * will copy the boot CPU state from the HSA. |
---|
600 | | - * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory) |
---|
601 | | - * condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP |
---|
| 616 | + * 2) stand-alone kdump for SCSI/NVMe (zfcp/nvme dump with swapped memory) |
---|
| 617 | + * condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == true |
---|
602 | 618 | * The state for all CPUs except the boot CPU needs to be collected |
---|
603 | 619 | * with sigp stop-and-store-status. The firmware or the boot-loader |
---|
604 | 620 | * stored the registers of the boot CPU in the absolute lowcore in the |
---|
.. | .. |
---|
645 | 661 | unsigned long page; |
---|
646 | 662 | bool is_boot_cpu; |
---|
647 | 663 | |
---|
648 | | - if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP)) |
---|
| 664 | + if (!(OLDMEM_BASE || is_ipl_type_dump())) |
---|
649 | 665 | /* No previous system present, normal boot. */ |
---|
650 | 666 | return; |
---|
651 | 667 | /* Allocate a page as dumping area for the store status sigps */ |
---|
652 | | - page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31); |
---|
| 668 | + page = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 1UL << 31); |
---|
| 669 | + if (!page) |
---|
| 670 | + panic("ERROR: Failed to allocate %lx bytes below %lx\n", |
---|
| 671 | + PAGE_SIZE, 1UL << 31); |
---|
| 672 | + |
---|
653 | 673 | /* Set multi-threading state to the previous system. */ |
---|
654 | 674 | pcpu_set_smt(sclp.mtid_prev); |
---|
655 | 675 | boot_cpu_addr = stap(); |
---|
.. | .. |
---|
667 | 687 | /* Get the vector registers */ |
---|
668 | 688 | smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page); |
---|
669 | 689 | /* |
---|
670 | | - * For a zfcp dump OLDMEM_BASE == NULL and the registers |
---|
| 690 | + * For a zfcp/nvme dump OLDMEM_BASE == NULL and the registers |
---|
671 | 691 | * of the boot CPU are stored in the HSA. To retrieve |
---|
672 | 692 | * these registers an SCLP request is required which is |
---|
673 | 693 | * done by drivers/s390/char/zcore.c:init_cpu_info() |
---|
.. | .. |
---|
677 | 697 | smp_save_cpu_regs(sa, addr, is_boot_cpu, page); |
---|
678 | 698 | } |
---|
679 | 699 | memblock_free(page, PAGE_SIZE); |
---|
680 | | - diag308_reset(); |
---|
| 700 | + diag_dma_ops.diag308_reset(); |
---|
681 | 701 | pcpu_set_smt(0); |
---|
682 | 702 | } |
---|
683 | 703 | #endif /* CONFIG_CRASH_DUMP */ |
---|
.. | .. |
---|
690 | 710 | int smp_cpu_get_polarization(int cpu) |
---|
691 | 711 | { |
---|
692 | 712 | return pcpu_devices[cpu].polarization; |
---|
| 713 | +} |
---|
| 714 | + |
---|
| 715 | +int smp_cpu_get_cpu_address(int cpu) |
---|
| 716 | +{ |
---|
| 717 | + return pcpu_devices[cpu].address; |
---|
693 | 718 | } |
---|
694 | 719 | |
---|
695 | 720 | static void __ref smp_get_core_info(struct sclp_core_info *info, int early) |
---|
.. | .. |
---|
786 | 811 | u16 address; |
---|
787 | 812 | |
---|
788 | 813 | /* Get CPU information */ |
---|
789 | | - info = memblock_virt_alloc(sizeof(*info), 8); |
---|
| 814 | + info = memblock_alloc(sizeof(*info), 8); |
---|
| 815 | + if (!info) |
---|
| 816 | + panic("%s: Failed to allocate %zu bytes align=0x%x\n", |
---|
| 817 | + __func__, sizeof(*info), 8); |
---|
790 | 818 | smp_get_core_info(info, 1); |
---|
791 | 819 | /* Find boot CPU type */ |
---|
792 | 820 | if (sclp.has_core_type) { |
---|
.. | .. |
---|
826 | 854 | memblock_free_early((unsigned long)info, sizeof(*info)); |
---|
827 | 855 | } |
---|
828 | 856 | |
---|
829 | | -/* |
---|
830 | | - * Activate a secondary processor. |
---|
831 | | - */ |
---|
832 | | -static void smp_start_secondary(void *cpuvoid) |
---|
| 857 | +static void smp_init_secondary(void) |
---|
833 | 858 | { |
---|
834 | 859 | int cpu = raw_smp_processor_id(); |
---|
835 | 860 | |
---|
836 | 861 | S390_lowcore.last_update_clock = get_tod_clock(); |
---|
837 | | - S390_lowcore.restart_stack = (unsigned long) restart_stack; |
---|
838 | | - S390_lowcore.restart_fn = (unsigned long) do_restart; |
---|
839 | | - S390_lowcore.restart_data = 0; |
---|
840 | | - S390_lowcore.restart_source = -1UL; |
---|
841 | 862 | restore_access_regs(S390_lowcore.access_regs_save_area); |
---|
842 | | - __ctl_load(S390_lowcore.cregs_save_area, 0, 15); |
---|
843 | | - __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); |
---|
844 | 863 | set_cpu_flag(CIF_ASCE_PRIMARY); |
---|
845 | 864 | set_cpu_flag(CIF_ASCE_SECONDARY); |
---|
846 | 865 | cpu_init(); |
---|
847 | 866 | rcu_cpu_starting(cpu); |
---|
848 | | - preempt_disable(); |
---|
849 | 867 | init_cpu_timer(); |
---|
850 | 868 | vtime_init(); |
---|
851 | 869 | pfault_init(); |
---|
.. | .. |
---|
855 | 873 | else |
---|
856 | 874 | clear_cpu_flag(CIF_DEDICATED_CPU); |
---|
857 | 875 | set_cpu_online(cpu, true); |
---|
| 876 | + update_cpu_masks(); |
---|
858 | 877 | inc_irq_stat(CPU_RST); |
---|
859 | 878 | local_irq_enable(); |
---|
860 | 879 | cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); |
---|
| 880 | +} |
---|
| 881 | + |
---|
| 882 | +/* |
---|
| 883 | + * Activate a secondary processor. |
---|
| 884 | + */ |
---|
| 885 | +static void __no_sanitize_address smp_start_secondary(void *cpuvoid) |
---|
| 886 | +{ |
---|
| 887 | + S390_lowcore.restart_stack = (unsigned long) restart_stack; |
---|
| 888 | + S390_lowcore.restart_fn = (unsigned long) do_restart; |
---|
| 889 | + S390_lowcore.restart_data = 0; |
---|
| 890 | + S390_lowcore.restart_source = -1UL; |
---|
| 891 | + __ctl_load(S390_lowcore.cregs_save_area, 0, 15); |
---|
| 892 | + __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); |
---|
| 893 | + CALL_ON_STACK_NORETURN(smp_init_secondary, S390_lowcore.kernel_stack); |
---|
861 | 894 | } |
---|
862 | 895 | |
---|
863 | 896 | /* Upping and downing of CPUs */ |
---|
.. | .. |
---|
893 | 926 | } |
---|
894 | 927 | early_param("possible_cpus", _setup_possible_cpus); |
---|
895 | 928 | |
---|
896 | | -#ifdef CONFIG_HOTPLUG_CPU |
---|
897 | | - |
---|
898 | 929 | int __cpu_disable(void) |
---|
899 | 930 | { |
---|
900 | 931 | unsigned long cregs[16]; |
---|
.. | .. |
---|
902 | 933 | /* Handle possible pending IPIs */ |
---|
903 | 934 | smp_handle_ext_call(); |
---|
904 | 935 | set_cpu_online(smp_processor_id(), false); |
---|
| 936 | + update_cpu_masks(); |
---|
905 | 937 | /* Disable pseudo page faults on this cpu. */ |
---|
906 | 938 | pfault_fini(); |
---|
907 | 939 | /* Disable interrupt sources via control register. */ |
---|
.. | .. |
---|
934 | 966 | pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); |
---|
935 | 967 | for (;;) ; |
---|
936 | 968 | } |
---|
937 | | - |
---|
938 | | -#endif /* CONFIG_HOTPLUG_CPU */ |
---|
939 | 969 | |
---|
940 | 970 | void __init smp_fill_possible_mask(void) |
---|
941 | 971 | { |
---|
.. | .. |
---|
971 | 1001 | smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); |
---|
972 | 1002 | } |
---|
973 | 1003 | |
---|
974 | | -void __init smp_cpus_done(unsigned int max_cpus) |
---|
975 | | -{ |
---|
976 | | -} |
---|
977 | | - |
---|
978 | 1004 | void __init smp_setup_processor_id(void) |
---|
979 | 1005 | { |
---|
980 | 1006 | pcpu_devices[0].address = stap(); |
---|
.. | .. |
---|
994 | 1020 | return 0; |
---|
995 | 1021 | } |
---|
996 | 1022 | |
---|
997 | | -#ifdef CONFIG_HOTPLUG_CPU |
---|
998 | 1023 | static ssize_t cpu_configure_show(struct device *dev, |
---|
999 | 1024 | struct device_attribute *attr, char *buf) |
---|
1000 | 1025 | { |
---|
.. | .. |
---|
1071 | 1096 | return rc ? rc : count; |
---|
1072 | 1097 | } |
---|
1073 | 1098 | static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); |
---|
1074 | | -#endif /* CONFIG_HOTPLUG_CPU */ |
---|
1075 | 1099 | |
---|
1076 | 1100 | static ssize_t show_cpu_address(struct device *dev, |
---|
1077 | 1101 | struct device_attribute *attr, char *buf) |
---|
.. | .. |
---|
1081 | 1105 | static DEVICE_ATTR(address, 0444, show_cpu_address, NULL); |
---|
1082 | 1106 | |
---|
1083 | 1107 | static struct attribute *cpu_common_attrs[] = { |
---|
1084 | | -#ifdef CONFIG_HOTPLUG_CPU |
---|
1085 | 1108 | &dev_attr_configure.attr, |
---|
1086 | | -#endif |
---|
1087 | 1109 | &dev_attr_address.attr, |
---|
1088 | 1110 | NULL, |
---|
1089 | 1111 | }; |
---|
.. | .. |
---|
1108 | 1130 | |
---|
1109 | 1131 | return sysfs_create_group(&s->kobj, &cpu_online_attr_group); |
---|
1110 | 1132 | } |
---|
| 1133 | + |
---|
1111 | 1134 | static int smp_cpu_pre_down(unsigned int cpu) |
---|
1112 | 1135 | { |
---|
1113 | 1136 | struct device *s = &per_cpu(cpu_device, cpu)->dev; |
---|
.. | .. |
---|
1142 | 1165 | out_topology: |
---|
1143 | 1166 | sysfs_remove_group(&s->kobj, &cpu_common_attr_group); |
---|
1144 | 1167 | out_cpu: |
---|
1145 | | -#ifdef CONFIG_HOTPLUG_CPU |
---|
1146 | 1168 | unregister_cpu(c); |
---|
1147 | | -#endif |
---|
1148 | 1169 | out: |
---|
1149 | 1170 | return rc; |
---|
1150 | 1171 | } |
---|
1151 | | - |
---|
1152 | | -#ifdef CONFIG_HOTPLUG_CPU |
---|
1153 | 1172 | |
---|
1154 | 1173 | int __ref smp_rescan_cpus(void) |
---|
1155 | 1174 | { |
---|
.. | .. |
---|
1186 | 1205 | return rc ? rc : count; |
---|
1187 | 1206 | } |
---|
1188 | 1207 | static DEVICE_ATTR_WO(rescan); |
---|
1189 | | -#endif /* CONFIG_HOTPLUG_CPU */ |
---|
1190 | 1208 | |
---|
1191 | 1209 | static int __init s390_smp_init(void) |
---|
1192 | 1210 | { |
---|
1193 | 1211 | int cpu, rc = 0; |
---|
1194 | 1212 | |
---|
1195 | | -#ifdef CONFIG_HOTPLUG_CPU |
---|
1196 | 1213 | rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan); |
---|
1197 | 1214 | if (rc) |
---|
1198 | 1215 | return rc; |
---|
1199 | | -#endif |
---|
1200 | 1216 | for_each_present_cpu(cpu) { |
---|
1201 | 1217 | rc = smp_add_present_cpu(cpu); |
---|
1202 | 1218 | if (rc) |
---|