| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * pseries CPU Hotplug infrastructure. |
|---|
| 3 | 4 | * |
|---|
| .. | .. |
|---|
| 11 | 12 | * Plus various changes from other IBM teams... |
|---|
| 12 | 13 | * |
|---|
| 13 | 14 | * Copyright (C) 2006 Michael Ellerman, IBM Corporation |
|---|
| 14 | | - * |
|---|
| 15 | | - * This program is free software; you can redistribute it and/or |
|---|
| 16 | | - * modify it under the terms of the GNU General Public License |
|---|
| 17 | | - * as published by the Free Software Foundation; either version |
|---|
| 18 | | - * 2 of the License, or (at your option) any later version. |
|---|
| 19 | 15 | */ |
|---|
| 20 | 16 | |
|---|
| 21 | 17 | #define pr_fmt(fmt) "pseries-hotplug-cpu: " fmt |
|---|
| .. | .. |
|---|
| 39 | 35 | #include <asm/topology.h> |
|---|
| 40 | 36 | |
|---|
| 41 | 37 | #include "pseries.h" |
|---|
| 42 | | -#include "offline_states.h" |
|---|
| 43 | 38 | |
|---|
| 44 | 39 | /* This version can't take the spinlock, because it never returns */ |
|---|
| 45 | 40 | static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE; |
|---|
| 46 | | - |
|---|
| 47 | | -static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) = |
|---|
| 48 | | - CPU_STATE_OFFLINE; |
|---|
| 49 | | -static DEFINE_PER_CPU(enum cpu_state_vals, current_state) = CPU_STATE_OFFLINE; |
|---|
| 50 | | - |
|---|
| 51 | | -static enum cpu_state_vals default_offline_state = CPU_STATE_OFFLINE; |
|---|
| 52 | | - |
|---|
| 53 | | -static bool cede_offline_enabled __read_mostly = true; |
|---|
| 54 | | - |
|---|
| 55 | | -/* |
|---|
| 56 | | - * Enable/disable cede_offline when available. |
|---|
| 57 | | - */ |
|---|
| 58 | | -static int __init setup_cede_offline(char *str) |
|---|
| 59 | | -{ |
|---|
| 60 | | - return (kstrtobool(str, &cede_offline_enabled) == 0); |
|---|
| 61 | | -} |
|---|
| 62 | | - |
|---|
| 63 | | -__setup("cede_offline=", setup_cede_offline); |
|---|
| 64 | | - |
|---|
| 65 | | -enum cpu_state_vals get_cpu_current_state(int cpu) |
|---|
| 66 | | -{ |
|---|
| 67 | | - return per_cpu(current_state, cpu); |
|---|
| 68 | | -} |
|---|
| 69 | | - |
|---|
| 70 | | -void set_cpu_current_state(int cpu, enum cpu_state_vals state) |
|---|
| 71 | | -{ |
|---|
| 72 | | - per_cpu(current_state, cpu) = state; |
|---|
| 73 | | -} |
|---|
| 74 | | - |
|---|
| 75 | | -enum cpu_state_vals get_preferred_offline_state(int cpu) |
|---|
| 76 | | -{ |
|---|
| 77 | | - return per_cpu(preferred_offline_state, cpu); |
|---|
| 78 | | -} |
|---|
| 79 | | - |
|---|
| 80 | | -void set_preferred_offline_state(int cpu, enum cpu_state_vals state) |
|---|
| 81 | | -{ |
|---|
| 82 | | - per_cpu(preferred_offline_state, cpu) = state; |
|---|
| 83 | | -} |
|---|
| 84 | | - |
|---|
| 85 | | -void set_default_offline_state(int cpu) |
|---|
| 86 | | -{ |
|---|
| 87 | | - per_cpu(preferred_offline_state, cpu) = default_offline_state; |
|---|
| 88 | | -} |
|---|
| 89 | 41 | |
|---|
| 90 | 42 | static void rtas_stop_self(void) |
|---|
| 91 | 43 | { |
|---|
| .. | .. |
|---|
| 100 | 52 | panic("Alas, I survived.\n"); |
|---|
| 101 | 53 | } |
|---|
| 102 | 54 | |
|---|
| 103 | | -static void pseries_mach_cpu_die(void) |
|---|
| 55 | +static void pseries_cpu_offline_self(void) |
|---|
| 104 | 56 | { |
|---|
| 105 | | - unsigned int cpu = smp_processor_id(); |
|---|
| 106 | 57 | unsigned int hwcpu = hard_smp_processor_id(); |
|---|
| 107 | | - u8 cede_latency_hint = 0; |
|---|
| 108 | 58 | |
|---|
| 109 | 59 | local_irq_disable(); |
|---|
| 110 | 60 | idle_task_exit(); |
|---|
| .. | .. |
|---|
| 113 | 63 | else |
|---|
| 114 | 64 | xics_teardown_cpu(); |
|---|
| 115 | 65 | |
|---|
| 116 | | - if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { |
|---|
| 117 | | - set_cpu_current_state(cpu, CPU_STATE_INACTIVE); |
|---|
| 118 | | - if (ppc_md.suspend_disable_cpu) |
|---|
| 119 | | - ppc_md.suspend_disable_cpu(); |
|---|
| 120 | | - |
|---|
| 121 | | - cede_latency_hint = 2; |
|---|
| 122 | | - |
|---|
| 123 | | - get_lppaca()->idle = 1; |
|---|
| 124 | | - if (!lppaca_shared_proc(get_lppaca())) |
|---|
| 125 | | - get_lppaca()->donate_dedicated_cpu = 1; |
|---|
| 126 | | - |
|---|
| 127 | | - while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { |
|---|
| 128 | | - while (!prep_irq_for_idle()) { |
|---|
| 129 | | - local_irq_enable(); |
|---|
| 130 | | - local_irq_disable(); |
|---|
| 131 | | - } |
|---|
| 132 | | - |
|---|
| 133 | | - extended_cede_processor(cede_latency_hint); |
|---|
| 134 | | - } |
|---|
| 135 | | - |
|---|
| 136 | | - local_irq_disable(); |
|---|
| 137 | | - |
|---|
| 138 | | - if (!lppaca_shared_proc(get_lppaca())) |
|---|
| 139 | | - get_lppaca()->donate_dedicated_cpu = 0; |
|---|
| 140 | | - get_lppaca()->idle = 0; |
|---|
| 141 | | - |
|---|
| 142 | | - if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) { |
|---|
| 143 | | - unregister_slb_shadow(hwcpu); |
|---|
| 144 | | - |
|---|
| 145 | | - hard_irq_disable(); |
|---|
| 146 | | - /* |
|---|
| 147 | | - * Call to start_secondary_resume() will not return. |
|---|
| 148 | | - * Kernel stack will be reset and start_secondary() |
|---|
| 149 | | - * will be called to continue the online operation. |
|---|
| 150 | | - */ |
|---|
| 151 | | - start_secondary_resume(); |
|---|
| 152 | | - } |
|---|
| 153 | | - } |
|---|
| 154 | | - |
|---|
| 155 | | - /* Requested state is CPU_STATE_OFFLINE at this point */ |
|---|
| 156 | | - WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE); |
|---|
| 157 | | - |
|---|
| 158 | | - set_cpu_current_state(cpu, CPU_STATE_OFFLINE); |
|---|
| 159 | 66 | unregister_slb_shadow(hwcpu); |
|---|
| 160 | 67 | rtas_stop_self(); |
|---|
| 161 | 68 | |
|---|
| .. | .. |
|---|
| 180 | 87 | xive_smp_disable_cpu(); |
|---|
| 181 | 88 | else |
|---|
| 182 | 89 | xics_migrate_irqs_away(); |
|---|
| 90 | + |
|---|
| 91 | + cleanup_cpu_mmu_context(); |
|---|
| 92 | + |
|---|
| 183 | 93 | return 0; |
|---|
| 184 | 94 | } |
|---|
| 185 | 95 | |
|---|
| .. | .. |
|---|
| 192 | 102 | * to self-destroy so that the cpu-offline thread can send the CPU_DEAD |
|---|
| 193 | 103 | * notifications. |
|---|
| 194 | 104 | * |
|---|
| 195 | | - * OTOH, pseries_mach_cpu_die() is called by the @cpu when it wants to |
|---|
| 105 | + * OTOH, pseries_cpu_offline_self() is called by the @cpu when it wants to |
|---|
| 196 | 106 | * self-destruct. |
|---|
| 197 | 107 | */ |
|---|
| 198 | 108 | static void pseries_cpu_die(unsigned int cpu) |
|---|
| 199 | 109 | { |
|---|
| 200 | | - int tries; |
|---|
| 201 | 110 | int cpu_status = 1; |
|---|
| 202 | 111 | unsigned int pcpu = get_hard_smp_processor_id(cpu); |
|---|
| 112 | + unsigned long timeout = jiffies + msecs_to_jiffies(120000); |
|---|
| 203 | 113 | |
|---|
| 204 | | - if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { |
|---|
| 205 | | - cpu_status = 1; |
|---|
| 206 | | - for (tries = 0; tries < 5000; tries++) { |
|---|
| 207 | | - if (get_cpu_current_state(cpu) == CPU_STATE_INACTIVE) { |
|---|
| 208 | | - cpu_status = 0; |
|---|
| 209 | | - break; |
|---|
| 210 | | - } |
|---|
| 211 | | - msleep(1); |
|---|
| 212 | | - } |
|---|
| 213 | | - } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) { |
|---|
| 114 | + while (true) { |
|---|
| 115 | + cpu_status = smp_query_cpu_stopped(pcpu); |
|---|
| 116 | + if (cpu_status == QCSS_STOPPED || |
|---|
| 117 | + cpu_status == QCSS_HARDWARE_ERROR) |
|---|
| 118 | + break; |
|---|
| 214 | 119 | |
|---|
| 215 | | - for (tries = 0; tries < 25; tries++) { |
|---|
| 216 | | - cpu_status = smp_query_cpu_stopped(pcpu); |
|---|
| 217 | | - if (cpu_status == QCSS_STOPPED || |
|---|
| 218 | | - cpu_status == QCSS_HARDWARE_ERROR) |
|---|
| 219 | | - break; |
|---|
| 220 | | - cpu_relax(); |
|---|
| 120 | + if (time_after(jiffies, timeout)) { |
|---|
| 121 | + pr_warn("CPU %i (hwid %i) didn't die after 120 seconds\n", |
|---|
| 122 | + cpu, pcpu); |
|---|
| 123 | + timeout = jiffies + msecs_to_jiffies(120000); |
|---|
| 221 | 124 | } |
|---|
| 125 | + |
|---|
| 126 | + cond_resched(); |
|---|
| 222 | 127 | } |
|---|
| 223 | 128 | |
|---|
| 224 | | - if (cpu_status != 0) { |
|---|
| 225 | | - printk("Querying DEAD? cpu %i (%i) shows %i\n", |
|---|
| 226 | | - cpu, pcpu, cpu_status); |
|---|
| 129 | + if (cpu_status == QCSS_HARDWARE_ERROR) { |
|---|
| 130 | + pr_warn("CPU %i (hwid %i) reported error while dying\n", |
|---|
| 131 | + cpu, pcpu); |
|---|
| 227 | 132 | } |
|---|
| 228 | 133 | |
|---|
| 229 | 134 | /* Isolation and deallocation are definitely done by |
|---|
| .. | .. |
|---|
| 284 | 189 | |
|---|
| 285 | 190 | if (cpumask_empty(tmp)) { |
|---|
| 286 | 191 | printk(KERN_ERR "Unable to find space in cpu_present_mask for" |
|---|
| 287 | | - " processor %s with %d thread(s)\n", np->name, |
|---|
| 192 | + " processor %pOFn with %d thread(s)\n", np, |
|---|
| 288 | 193 | nthreads); |
|---|
| 289 | 194 | goto out_unlock; |
|---|
| 290 | 195 | } |
|---|
| .. | .. |
|---|
| 339 | 244 | cpu_maps_update_done(); |
|---|
| 340 | 245 | } |
|---|
| 341 | 246 | |
|---|
| 247 | +static int dlpar_offline_cpu(struct device_node *dn) |
|---|
| 248 | +{ |
|---|
| 249 | + int rc = 0; |
|---|
| 250 | + unsigned int cpu; |
|---|
| 251 | + int len, nthreads, i; |
|---|
| 252 | + const __be32 *intserv; |
|---|
| 253 | + u32 thread; |
|---|
| 254 | + |
|---|
| 255 | + intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); |
|---|
| 256 | + if (!intserv) |
|---|
| 257 | + return -EINVAL; |
|---|
| 258 | + |
|---|
| 259 | + nthreads = len / sizeof(u32); |
|---|
| 260 | + |
|---|
| 261 | + cpu_maps_update_begin(); |
|---|
| 262 | + for (i = 0; i < nthreads; i++) { |
|---|
| 263 | + thread = be32_to_cpu(intserv[i]); |
|---|
| 264 | + for_each_present_cpu(cpu) { |
|---|
| 265 | + if (get_hard_smp_processor_id(cpu) != thread) |
|---|
| 266 | + continue; |
|---|
| 267 | + |
|---|
| 268 | + if (!cpu_online(cpu)) |
|---|
| 269 | + break; |
|---|
| 270 | + |
|---|
| 271 | + cpu_maps_update_done(); |
|---|
| 272 | + rc = device_offline(get_cpu_device(cpu)); |
|---|
| 273 | + if (rc) |
|---|
| 274 | + goto out; |
|---|
| 275 | + cpu_maps_update_begin(); |
|---|
| 276 | + break; |
|---|
| 277 | + } |
|---|
| 278 | + if (cpu == num_possible_cpus()) { |
|---|
| 279 | + pr_warn("Could not find cpu to offline with physical id 0x%x\n", |
|---|
| 280 | + thread); |
|---|
| 281 | + } |
|---|
| 282 | + } |
|---|
| 283 | + cpu_maps_update_done(); |
|---|
| 284 | + |
|---|
| 285 | +out: |
|---|
| 286 | + return rc; |
|---|
| 287 | +} |
|---|
| 288 | + |
|---|
| 342 | 289 | static int dlpar_online_cpu(struct device_node *dn) |
|---|
| 343 | 290 | { |
|---|
| 344 | 291 | int rc = 0; |
|---|
| .. | .. |
|---|
| 359 | 306 | for_each_present_cpu(cpu) { |
|---|
| 360 | 307 | if (get_hard_smp_processor_id(cpu) != thread) |
|---|
| 361 | 308 | continue; |
|---|
| 362 | | - BUG_ON(get_cpu_current_state(cpu) |
|---|
| 363 | | - != CPU_STATE_OFFLINE); |
|---|
| 364 | 309 | cpu_maps_update_done(); |
|---|
| 365 | | - timed_topology_update(1); |
|---|
| 366 | 310 | find_and_online_cpu_nid(cpu); |
|---|
| 367 | 311 | rc = device_online(get_cpu_device(cpu)); |
|---|
| 368 | | - if (rc) |
|---|
| 312 | + if (rc) { |
|---|
| 313 | + dlpar_offline_cpu(dn); |
|---|
| 369 | 314 | goto out; |
|---|
| 315 | + } |
|---|
| 370 | 316 | cpu_maps_update_begin(); |
|---|
| 371 | 317 | |
|---|
| 372 | 318 | break; |
|---|
| .. | .. |
|---|
| 408 | 354 | return found; |
|---|
| 409 | 355 | } |
|---|
| 410 | 356 | |
|---|
| 357 | +static bool drc_info_valid_index(struct device_node *parent, u32 drc_index) |
|---|
| 358 | +{ |
|---|
| 359 | + struct property *info; |
|---|
| 360 | + struct of_drc_info drc; |
|---|
| 361 | + const __be32 *value; |
|---|
| 362 | + u32 index; |
|---|
| 363 | + int count, i, j; |
|---|
| 364 | + |
|---|
| 365 | + info = of_find_property(parent, "ibm,drc-info", NULL); |
|---|
| 366 | + if (!info) |
|---|
| 367 | + return false; |
|---|
| 368 | + |
|---|
| 369 | + value = of_prop_next_u32(info, NULL, &count); |
|---|
| 370 | + |
|---|
| 371 | + /* First value of ibm,drc-info is number of drc-info records */ |
|---|
| 372 | + if (value) |
|---|
| 373 | + value++; |
|---|
| 374 | + else |
|---|
| 375 | + return false; |
|---|
| 376 | + |
|---|
| 377 | + for (i = 0; i < count; i++) { |
|---|
| 378 | + if (of_read_drc_info_cell(&info, &value, &drc)) |
|---|
| 379 | + return false; |
|---|
| 380 | + |
|---|
| 381 | + if (strncmp(drc.drc_type, "CPU", 3)) |
|---|
| 382 | + break; |
|---|
| 383 | + |
|---|
| 384 | + if (drc_index > drc.last_drc_index) |
|---|
| 385 | + continue; |
|---|
| 386 | + |
|---|
| 387 | + index = drc.drc_index_start; |
|---|
| 388 | + for (j = 0; j < drc.num_sequential_elems; j++) { |
|---|
| 389 | + if (drc_index == index) |
|---|
| 390 | + return true; |
|---|
| 391 | + |
|---|
| 392 | + index += drc.sequential_inc; |
|---|
| 393 | + } |
|---|
| 394 | + } |
|---|
| 395 | + |
|---|
| 396 | + return false; |
|---|
| 397 | +} |
|---|
| 398 | + |
|---|
| 411 | 399 | static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index) |
|---|
| 412 | 400 | { |
|---|
| 413 | 401 | bool found = false; |
|---|
| 414 | 402 | int rc, index; |
|---|
| 415 | 403 | |
|---|
| 416 | | - index = 0; |
|---|
| 404 | + if (of_find_property(parent, "ibm,drc-info", NULL)) |
|---|
| 405 | + return drc_info_valid_index(parent, drc_index); |
|---|
| 406 | + |
|---|
| 407 | + /* Note that the format of the ibm,drc-indexes array is |
|---|
| 408 | + * the number of entries in the array followed by the array |
|---|
| 409 | + * of drc values so we start looking at index = 1. |
|---|
| 410 | + */ |
|---|
| 411 | + index = 1; |
|---|
| 417 | 412 | while (!found) { |
|---|
| 418 | 413 | u32 drc; |
|---|
| 419 | 414 | |
|---|
| 420 | 415 | rc = of_property_read_u32_index(parent, "ibm,drc-indexes", |
|---|
| 421 | 416 | index++, &drc); |
|---|
| 417 | + |
|---|
| 422 | 418 | if (rc) |
|---|
| 423 | 419 | break; |
|---|
| 424 | 420 | |
|---|
| .. | .. |
|---|
| 478 | 474 | |
|---|
| 479 | 475 | if (rc) { |
|---|
| 480 | 476 | saved_rc = rc; |
|---|
| 481 | | - pr_warn("Failed to attach node %s, rc: %d, drc index: %x\n", |
|---|
| 482 | | - dn->name, rc, drc_index); |
|---|
| 477 | + pr_warn("Failed to attach node %pOFn, rc: %d, drc index: %x\n", |
|---|
| 478 | + dn, rc, drc_index); |
|---|
| 483 | 479 | |
|---|
| 484 | 480 | rc = dlpar_release_drc(drc_index); |
|---|
| 485 | 481 | if (!rc) |
|---|
| .. | .. |
|---|
| 491 | 487 | rc = dlpar_online_cpu(dn); |
|---|
| 492 | 488 | if (rc) { |
|---|
| 493 | 489 | saved_rc = rc; |
|---|
| 494 | | - pr_warn("Failed to online cpu %s, rc: %d, drc index: %x\n", |
|---|
| 495 | | - dn->name, rc, drc_index); |
|---|
| 490 | + pr_warn("Failed to online cpu %pOFn, rc: %d, drc index: %x\n", |
|---|
| 491 | + dn, rc, drc_index); |
|---|
| 496 | 492 | |
|---|
| 497 | 493 | rc = dlpar_detach_node(dn); |
|---|
| 498 | 494 | if (!rc) |
|---|
| .. | .. |
|---|
| 501 | 497 | return saved_rc; |
|---|
| 502 | 498 | } |
|---|
| 503 | 499 | |
|---|
| 504 | | - pr_debug("Successfully added CPU %s, drc index: %x\n", dn->name, |
|---|
| 500 | + pr_debug("Successfully added CPU %pOFn, drc index: %x\n", dn, |
|---|
| 505 | 501 | drc_index); |
|---|
| 506 | 502 | return rc; |
|---|
| 507 | | -} |
|---|
| 508 | | - |
|---|
| 509 | | -static int dlpar_offline_cpu(struct device_node *dn) |
|---|
| 510 | | -{ |
|---|
| 511 | | - int rc = 0; |
|---|
| 512 | | - unsigned int cpu; |
|---|
| 513 | | - int len, nthreads, i; |
|---|
| 514 | | - const __be32 *intserv; |
|---|
| 515 | | - u32 thread; |
|---|
| 516 | | - |
|---|
| 517 | | - intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); |
|---|
| 518 | | - if (!intserv) |
|---|
| 519 | | - return -EINVAL; |
|---|
| 520 | | - |
|---|
| 521 | | - nthreads = len / sizeof(u32); |
|---|
| 522 | | - |
|---|
| 523 | | - cpu_maps_update_begin(); |
|---|
| 524 | | - for (i = 0; i < nthreads; i++) { |
|---|
| 525 | | - thread = be32_to_cpu(intserv[i]); |
|---|
| 526 | | - for_each_present_cpu(cpu) { |
|---|
| 527 | | - if (get_hard_smp_processor_id(cpu) != thread) |
|---|
| 528 | | - continue; |
|---|
| 529 | | - |
|---|
| 530 | | - if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE) |
|---|
| 531 | | - break; |
|---|
| 532 | | - |
|---|
| 533 | | - if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) { |
|---|
| 534 | | - set_preferred_offline_state(cpu, |
|---|
| 535 | | - CPU_STATE_OFFLINE); |
|---|
| 536 | | - cpu_maps_update_done(); |
|---|
| 537 | | - timed_topology_update(1); |
|---|
| 538 | | - rc = device_offline(get_cpu_device(cpu)); |
|---|
| 539 | | - if (rc) |
|---|
| 540 | | - goto out; |
|---|
| 541 | | - cpu_maps_update_begin(); |
|---|
| 542 | | - break; |
|---|
| 543 | | - |
|---|
| 544 | | - } |
|---|
| 545 | | - |
|---|
| 546 | | - /* |
|---|
| 547 | | - * The cpu is in CPU_STATE_INACTIVE. |
|---|
| 548 | | - * Upgrade it's state to CPU_STATE_OFFLINE. |
|---|
| 549 | | - */ |
|---|
| 550 | | - set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); |
|---|
| 551 | | - BUG_ON(plpar_hcall_norets(H_PROD, thread) |
|---|
| 552 | | - != H_SUCCESS); |
|---|
| 553 | | - __cpu_die(cpu); |
|---|
| 554 | | - break; |
|---|
| 555 | | - } |
|---|
| 556 | | - if (cpu == num_possible_cpus()) |
|---|
| 557 | | - printk(KERN_WARNING "Could not find cpu to offline with physical id 0x%x\n", thread); |
|---|
| 558 | | - } |
|---|
| 559 | | - cpu_maps_update_done(); |
|---|
| 560 | | - |
|---|
| 561 | | -out: |
|---|
| 562 | | - return rc; |
|---|
| 563 | | - |
|---|
| 564 | 503 | } |
|---|
| 565 | 504 | |
|---|
| 566 | 505 | static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index) |
|---|
| 567 | 506 | { |
|---|
| 568 | 507 | int rc; |
|---|
| 569 | 508 | |
|---|
| 570 | | - pr_debug("Attempting to remove CPU %s, drc index: %x\n", |
|---|
| 571 | | - dn->name, drc_index); |
|---|
| 509 | + pr_debug("Attempting to remove CPU %pOFn, drc index: %x\n", |
|---|
| 510 | + dn, drc_index); |
|---|
| 572 | 511 | |
|---|
| 573 | 512 | rc = dlpar_offline_cpu(dn); |
|---|
| 574 | 513 | if (rc) { |
|---|
| 575 | | - pr_warn("Failed to offline CPU %s, rc: %d\n", dn->name, rc); |
|---|
| 514 | + pr_warn("Failed to offline CPU %pOFn, rc: %d\n", dn, rc); |
|---|
| 576 | 515 | return -EINVAL; |
|---|
| 577 | 516 | } |
|---|
| 578 | 517 | |
|---|
| 579 | 518 | rc = dlpar_release_drc(drc_index); |
|---|
| 580 | 519 | if (rc) { |
|---|
| 581 | | - pr_warn("Failed to release drc (%x) for CPU %s, rc: %d\n", |
|---|
| 582 | | - drc_index, dn->name, rc); |
|---|
| 520 | + pr_warn("Failed to release drc (%x) for CPU %pOFn, rc: %d\n", |
|---|
| 521 | + drc_index, dn, rc); |
|---|
| 583 | 522 | dlpar_online_cpu(dn); |
|---|
| 584 | 523 | return rc; |
|---|
| 585 | 524 | } |
|---|
| .. | .. |
|---|
| 588 | 527 | if (rc) { |
|---|
| 589 | 528 | int saved_rc = rc; |
|---|
| 590 | 529 | |
|---|
| 591 | | - pr_warn("Failed to detach CPU %s, rc: %d", dn->name, rc); |
|---|
| 530 | + pr_warn("Failed to detach CPU %pOFn, rc: %d", dn, rc); |
|---|
| 592 | 531 | |
|---|
| 593 | 532 | rc = dlpar_acquire_drc(drc_index); |
|---|
| 594 | 533 | if (!rc) |
|---|
| .. | .. |
|---|
| 659 | 598 | rc = of_property_read_u32(dn, "ibm,my-drc-index", |
|---|
| 660 | 599 | &cpu_drcs[cpus_found - 1]); |
|---|
| 661 | 600 | if (rc) { |
|---|
| 662 | | - pr_warn("Error occurred getting drc-index for %s\n", |
|---|
| 663 | | - dn->name); |
|---|
| 601 | + pr_warn("Error occurred getting drc-index for %pOFn\n", |
|---|
| 602 | + dn); |
|---|
| 664 | 603 | of_node_put(dn); |
|---|
| 665 | 604 | return -1; |
|---|
| 666 | 605 | } |
|---|
| .. | .. |
|---|
| 718 | 657 | return rc; |
|---|
| 719 | 658 | } |
|---|
| 720 | 659 | |
|---|
| 721 | | -static int find_dlpar_cpus_to_add(u32 *cpu_drcs, u32 cpus_to_add) |
|---|
| 660 | +static int find_drc_info_cpus_to_add(struct device_node *cpus, |
|---|
| 661 | + struct property *info, |
|---|
| 662 | + u32 *cpu_drcs, u32 cpus_to_add) |
|---|
| 722 | 663 | { |
|---|
| 723 | | - struct device_node *parent; |
|---|
| 664 | + struct of_drc_info drc; |
|---|
| 665 | + const __be32 *value; |
|---|
| 666 | + u32 count, drc_index; |
|---|
| 667 | + int cpus_found = 0; |
|---|
| 668 | + int i, j; |
|---|
| 669 | + |
|---|
| 670 | + if (!info) |
|---|
| 671 | + return -1; |
|---|
| 672 | + |
|---|
| 673 | + value = of_prop_next_u32(info, NULL, &count); |
|---|
| 674 | + if (value) |
|---|
| 675 | + value++; |
|---|
| 676 | + |
|---|
| 677 | + for (i = 0; i < count; i++) { |
|---|
| 678 | + of_read_drc_info_cell(&info, &value, &drc); |
|---|
| 679 | + if (strncmp(drc.drc_type, "CPU", 3)) |
|---|
| 680 | + break; |
|---|
| 681 | + |
|---|
| 682 | + drc_index = drc.drc_index_start; |
|---|
| 683 | + for (j = 0; j < drc.num_sequential_elems; j++) { |
|---|
| 684 | + if (dlpar_cpu_exists(cpus, drc_index)) |
|---|
| 685 | + continue; |
|---|
| 686 | + |
|---|
| 687 | + cpu_drcs[cpus_found++] = drc_index; |
|---|
| 688 | + |
|---|
| 689 | + if (cpus_found == cpus_to_add) |
|---|
| 690 | + return cpus_found; |
|---|
| 691 | + |
|---|
| 692 | + drc_index += drc.sequential_inc; |
|---|
| 693 | + } |
|---|
| 694 | + } |
|---|
| 695 | + |
|---|
| 696 | + return cpus_found; |
|---|
| 697 | +} |
|---|
| 698 | + |
|---|
| 699 | +static int find_drc_index_cpus_to_add(struct device_node *cpus, |
|---|
| 700 | + u32 *cpu_drcs, u32 cpus_to_add) |
|---|
| 701 | +{ |
|---|
| 724 | 702 | int cpus_found = 0; |
|---|
| 725 | 703 | int index, rc; |
|---|
| 726 | | - |
|---|
| 727 | | - parent = of_find_node_by_path("/cpus"); |
|---|
| 728 | | - if (!parent) { |
|---|
| 729 | | - pr_warn("Could not find CPU root node in device tree\n"); |
|---|
| 730 | | - kfree(cpu_drcs); |
|---|
| 731 | | - return -1; |
|---|
| 732 | | - } |
|---|
| 704 | + u32 drc_index; |
|---|
| 733 | 705 | |
|---|
| 734 | 706 | /* Search the ibm,drc-indexes array for possible CPU drcs to |
|---|
| 735 | 707 | * add. Note that the format of the ibm,drc-indexes array is |
|---|
| .. | .. |
|---|
| 738 | 710 | */ |
|---|
| 739 | 711 | index = 1; |
|---|
| 740 | 712 | while (cpus_found < cpus_to_add) { |
|---|
| 741 | | - u32 drc; |
|---|
| 713 | + rc = of_property_read_u32_index(cpus, "ibm,drc-indexes", |
|---|
| 714 | + index++, &drc_index); |
|---|
| 742 | 715 | |
|---|
| 743 | | - rc = of_property_read_u32_index(parent, "ibm,drc-indexes", |
|---|
| 744 | | - index++, &drc); |
|---|
| 745 | 716 | if (rc) |
|---|
| 746 | 717 | break; |
|---|
| 747 | 718 | |
|---|
| 748 | | - if (dlpar_cpu_exists(parent, drc)) |
|---|
| 719 | + if (dlpar_cpu_exists(cpus, drc_index)) |
|---|
| 749 | 720 | continue; |
|---|
| 750 | 721 | |
|---|
| 751 | | - cpu_drcs[cpus_found++] = drc; |
|---|
| 722 | + cpu_drcs[cpus_found++] = drc_index; |
|---|
| 752 | 723 | } |
|---|
| 753 | 724 | |
|---|
| 754 | | - of_node_put(parent); |
|---|
| 755 | 725 | return cpus_found; |
|---|
| 756 | 726 | } |
|---|
| 757 | 727 | |
|---|
| 758 | 728 | static int dlpar_cpu_add_by_count(u32 cpus_to_add) |
|---|
| 759 | 729 | { |
|---|
| 730 | + struct device_node *parent; |
|---|
| 731 | + struct property *info; |
|---|
| 760 | 732 | u32 *cpu_drcs; |
|---|
| 761 | 733 | int cpus_added = 0; |
|---|
| 762 | 734 | int cpus_found; |
|---|
| .. | .. |
|---|
| 768 | 740 | if (!cpu_drcs) |
|---|
| 769 | 741 | return -EINVAL; |
|---|
| 770 | 742 | |
|---|
| 771 | | - cpus_found = find_dlpar_cpus_to_add(cpu_drcs, cpus_to_add); |
|---|
| 743 | + parent = of_find_node_by_path("/cpus"); |
|---|
| 744 | + if (!parent) { |
|---|
| 745 | + pr_warn("Could not find CPU root node in device tree\n"); |
|---|
| 746 | + kfree(cpu_drcs); |
|---|
| 747 | + return -1; |
|---|
| 748 | + } |
|---|
| 749 | + |
|---|
| 750 | + info = of_find_property(parent, "ibm,drc-info", NULL); |
|---|
| 751 | + if (info) |
|---|
| 752 | + cpus_found = find_drc_info_cpus_to_add(parent, info, cpu_drcs, cpus_to_add); |
|---|
| 753 | + else |
|---|
| 754 | + cpus_found = find_drc_index_cpus_to_add(parent, cpu_drcs, cpus_to_add); |
|---|
| 755 | + |
|---|
| 756 | + of_node_put(parent); |
|---|
| 757 | + |
|---|
| 772 | 758 | if (cpus_found < cpus_to_add) { |
|---|
| 773 | 759 | pr_warn("Failed to find enough CPUs (%d of %d) to add\n", |
|---|
| 774 | 760 | cpus_found, cpus_to_add); |
|---|
| .. | .. |
|---|
| 796 | 782 | } |
|---|
| 797 | 783 | |
|---|
| 798 | 784 | kfree(cpu_drcs); |
|---|
| 799 | | - return rc; |
|---|
| 800 | | -} |
|---|
| 801 | | - |
|---|
| 802 | | -int dlpar_cpu_readd(int cpu) |
|---|
| 803 | | -{ |
|---|
| 804 | | - struct device_node *dn; |
|---|
| 805 | | - struct device *dev; |
|---|
| 806 | | - u32 drc_index; |
|---|
| 807 | | - int rc; |
|---|
| 808 | | - |
|---|
| 809 | | - dev = get_cpu_device(cpu); |
|---|
| 810 | | - dn = dev->of_node; |
|---|
| 811 | | - |
|---|
| 812 | | - rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index); |
|---|
| 813 | | - |
|---|
| 814 | | - rc = dlpar_cpu_remove_by_index(drc_index); |
|---|
| 815 | | - if (!rc) |
|---|
| 816 | | - rc = dlpar_cpu_add(drc_index); |
|---|
| 817 | | - |
|---|
| 818 | 785 | return rc; |
|---|
| 819 | 786 | } |
|---|
| 820 | 787 | |
|---|
| .. | .. |
|---|
| 916 | 883 | .notifier_call = pseries_smp_notifier, |
|---|
| 917 | 884 | }; |
|---|
| 918 | 885 | |
|---|
| 919 | | -#define MAX_CEDE_LATENCY_LEVELS 4 |
|---|
| 920 | | -#define CEDE_LATENCY_PARAM_LENGTH 10 |
|---|
| 921 | | -#define CEDE_LATENCY_PARAM_MAX_LENGTH \ |
|---|
| 922 | | - (MAX_CEDE_LATENCY_LEVELS * CEDE_LATENCY_PARAM_LENGTH * sizeof(char)) |
|---|
| 923 | | -#define CEDE_LATENCY_TOKEN 45 |
|---|
| 924 | | - |
|---|
| 925 | | -static char cede_parameters[CEDE_LATENCY_PARAM_MAX_LENGTH]; |
|---|
| 926 | | - |
|---|
| 927 | | -static int parse_cede_parameters(void) |
|---|
| 928 | | -{ |
|---|
| 929 | | - memset(cede_parameters, 0, CEDE_LATENCY_PARAM_MAX_LENGTH); |
|---|
| 930 | | - return rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, |
|---|
| 931 | | - NULL, |
|---|
| 932 | | - CEDE_LATENCY_TOKEN, |
|---|
| 933 | | - __pa(cede_parameters), |
|---|
| 934 | | - CEDE_LATENCY_PARAM_MAX_LENGTH); |
|---|
| 935 | | -} |
|---|
| 936 | | - |
|---|
| 937 | 886 | static int __init pseries_cpu_hotplug_init(void) |
|---|
| 938 | 887 | { |
|---|
| 939 | | - int cpu; |
|---|
| 940 | 888 | int qcss_tok; |
|---|
| 941 | 889 | |
|---|
| 942 | 890 | #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE |
|---|
| .. | .. |
|---|
| 954 | 902 | return 0; |
|---|
| 955 | 903 | } |
|---|
| 956 | 904 | |
|---|
| 957 | | - ppc_md.cpu_die = pseries_mach_cpu_die; |
|---|
| 905 | + smp_ops->cpu_offline_self = pseries_cpu_offline_self; |
|---|
| 958 | 906 | smp_ops->cpu_disable = pseries_cpu_disable; |
|---|
| 959 | 907 | smp_ops->cpu_die = pseries_cpu_die; |
|---|
| 960 | 908 | |
|---|
| 961 | 909 | /* Processors can be added/removed only on LPAR */ |
|---|
| 962 | | - if (firmware_has_feature(FW_FEATURE_LPAR)) { |
|---|
| 910 | + if (firmware_has_feature(FW_FEATURE_LPAR)) |
|---|
| 963 | 911 | of_reconfig_notifier_register(&pseries_smp_nb); |
|---|
| 964 | | - cpu_maps_update_begin(); |
|---|
| 965 | | - if (cede_offline_enabled && parse_cede_parameters() == 0) { |
|---|
| 966 | | - default_offline_state = CPU_STATE_INACTIVE; |
|---|
| 967 | | - for_each_online_cpu(cpu) |
|---|
| 968 | | - set_default_offline_state(cpu); |
|---|
| 969 | | - } |
|---|
| 970 | | - cpu_maps_update_done(); |
|---|
| 971 | | - } |
|---|
| 972 | 912 | |
|---|
| 973 | 913 | return 0; |
|---|
| 974 | 914 | } |
|---|