From 08f87f769b595151be1afeff53e144f543faa614 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Wed, 06 Dec 2023 09:51:13 +0000 Subject: [PATCH] add dts config --- kernel/arch/powerpc/platforms/pseries/hotplug-cpu.c | 452 ++++++++++++++++++++++++-------------------------------- 1 files changed, 196 insertions(+), 256 deletions(-) diff --git a/kernel/arch/powerpc/platforms/pseries/hotplug-cpu.c b/kernel/arch/powerpc/platforms/pseries/hotplug-cpu.c index 8bfb97d..325f3b2 100644 --- a/kernel/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/kernel/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * pseries CPU Hotplug infrastructure. * @@ -11,11 +12,6 @@ * Plus various changes from other IBM teams... * * Copyright (C) 2006 Michael Ellerman, IBM Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) "pseries-hotplug-cpu: " fmt @@ -39,53 +35,9 @@ #include <asm/topology.h> #include "pseries.h" -#include "offline_states.h" /* This version can't take the spinlock, because it never returns */ static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE; - -static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) = - CPU_STATE_OFFLINE; -static DEFINE_PER_CPU(enum cpu_state_vals, current_state) = CPU_STATE_OFFLINE; - -static enum cpu_state_vals default_offline_state = CPU_STATE_OFFLINE; - -static bool cede_offline_enabled __read_mostly = true; - -/* - * Enable/disable cede_offline when available. - */ -static int __init setup_cede_offline(char *str) -{ - return (kstrtobool(str, &cede_offline_enabled) == 0); -} - -__setup("cede_offline=", setup_cede_offline); - -enum cpu_state_vals get_cpu_current_state(int cpu) -{ - return per_cpu(current_state, cpu); -} - -void set_cpu_current_state(int cpu, enum cpu_state_vals state) -{ - per_cpu(current_state, cpu) = state; -} - -enum cpu_state_vals get_preferred_offline_state(int cpu) -{ - return per_cpu(preferred_offline_state, cpu); -} - -void set_preferred_offline_state(int cpu, enum cpu_state_vals state) -{ - per_cpu(preferred_offline_state, cpu) = state; -} - -void set_default_offline_state(int cpu) -{ - per_cpu(preferred_offline_state, cpu) = default_offline_state; -} static void rtas_stop_self(void) { @@ -100,11 +52,9 @@ panic("Alas, I survived.\n"); } -static void pseries_mach_cpu_die(void) +static void pseries_cpu_offline_self(void) { - unsigned int cpu = smp_processor_id(); unsigned int hwcpu = hard_smp_processor_id(); - u8 cede_latency_hint = 0; local_irq_disable(); idle_task_exit(); @@ -113,49 +63,6 @@ else xics_teardown_cpu(); - if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { - set_cpu_current_state(cpu, CPU_STATE_INACTIVE); - if (ppc_md.suspend_disable_cpu) - ppc_md.suspend_disable_cpu(); - - cede_latency_hint = 2; - - get_lppaca()->idle = 1; - if (!lppaca_shared_proc(get_lppaca())) - get_lppaca()->donate_dedicated_cpu = 1; - - while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { - while (!prep_irq_for_idle()) { - local_irq_enable(); - local_irq_disable(); - } - - extended_cede_processor(cede_latency_hint); - } - - local_irq_disable(); - - if (!lppaca_shared_proc(get_lppaca())) - get_lppaca()->donate_dedicated_cpu = 0; - get_lppaca()->idle = 0; - - if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) { - unregister_slb_shadow(hwcpu); - - hard_irq_disable(); - /* - * Call to start_secondary_resume() will not return. - * Kernel stack will be reset and start_secondary() - * will be called to continue the online operation. - */ - start_secondary_resume(); - } - } - - /* Requested state is CPU_STATE_OFFLINE at this point */ - WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE); - - set_cpu_current_state(cpu, CPU_STATE_OFFLINE); unregister_slb_shadow(hwcpu); rtas_stop_self(); @@ -180,6 +87,9 @@ xive_smp_disable_cpu(); else xics_migrate_irqs_away(); + + cleanup_cpu_mmu_context(); + return 0; } @@ -192,38 +102,33 @@ * to self-destroy so that the cpu-offline thread can send the CPU_DEAD * notifications. * - * OTOH, pseries_mach_cpu_die() is called by the @cpu when it wants to + * OTOH, pseries_cpu_offline_self() is called by the @cpu when it wants to * self-destruct. */ static void pseries_cpu_die(unsigned int cpu) { - int tries; int cpu_status = 1; unsigned int pcpu = get_hard_smp_processor_id(cpu); + unsigned long timeout = jiffies + msecs_to_jiffies(120000); - if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { - cpu_status = 1; - for (tries = 0; tries < 5000; tries++) { - if (get_cpu_current_state(cpu) == CPU_STATE_INACTIVE) { - cpu_status = 0; - break; - } - msleep(1); - } - } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) { + while (true) { + cpu_status = smp_query_cpu_stopped(pcpu); + if (cpu_status == QCSS_STOPPED || + cpu_status == QCSS_HARDWARE_ERROR) + break; - for (tries = 0; tries < 25; tries++) { - cpu_status = smp_query_cpu_stopped(pcpu); - if (cpu_status == QCSS_STOPPED || - cpu_status == QCSS_HARDWARE_ERROR) - break; - cpu_relax(); + if (time_after(jiffies, timeout)) { + pr_warn("CPU %i (hwid %i) didn't die after 120 seconds\n", + cpu, pcpu); + timeout = jiffies + msecs_to_jiffies(120000); } + + cond_resched(); } - if (cpu_status != 0) { - printk("Querying DEAD? cpu %i (%i) shows %i\n", - cpu, pcpu, cpu_status); + if (cpu_status == QCSS_HARDWARE_ERROR) { + pr_warn("CPU %i (hwid %i) reported error while dying\n", + cpu, pcpu); } /* Isolation and deallocation are definitely done by @@ -284,7 +189,7 @@ if (cpumask_empty(tmp)) { printk(KERN_ERR "Unable to find space in cpu_present_mask for" - " processor %s with %d thread(s)\n", np->name, + " processor %pOFn with %d thread(s)\n", np, nthreads); goto out_unlock; } @@ -339,6 +244,48 @@ cpu_maps_update_done(); } +static int dlpar_offline_cpu(struct device_node *dn) +{ + int rc = 0; + unsigned int cpu; + int len, nthreads, i; + const __be32 *intserv; + u32 thread; + + intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); + if (!intserv) + return -EINVAL; + + nthreads = len / sizeof(u32); + + cpu_maps_update_begin(); + for (i = 0; i < nthreads; i++) { + thread = be32_to_cpu(intserv[i]); + for_each_present_cpu(cpu) { + if (get_hard_smp_processor_id(cpu) != thread) + continue; + + if (!cpu_online(cpu)) + break; + + cpu_maps_update_done(); + rc = device_offline(get_cpu_device(cpu)); + if (rc) + goto out; + cpu_maps_update_begin(); + break; + } + if (cpu == num_possible_cpus()) { + pr_warn("Could not find cpu to offline with physical id 0x%x\n", + thread); + } + } + cpu_maps_update_done(); + +out: + return rc; +} + static int dlpar_online_cpu(struct device_node *dn) { int rc = 0; @@ -359,14 +306,13 @@ for_each_present_cpu(cpu) { if (get_hard_smp_processor_id(cpu) != thread) continue; - BUG_ON(get_cpu_current_state(cpu) - != CPU_STATE_OFFLINE); cpu_maps_update_done(); - timed_topology_update(1); find_and_online_cpu_nid(cpu); rc = device_online(get_cpu_device(cpu)); - if (rc) + if (rc) { + dlpar_offline_cpu(dn); goto out; + } cpu_maps_update_begin(); break; @@ -408,17 +354,67 @@ return found; } +static bool drc_info_valid_index(struct device_node *parent, u32 drc_index) +{ + struct property *info; + struct of_drc_info drc; + const __be32 *value; + u32 index; + int count, i, j; + + info = of_find_property(parent, "ibm,drc-info", NULL); + if (!info) + return false; + + value = of_prop_next_u32(info, NULL, &count); + + /* First value of ibm,drc-info is number of drc-info records */ + if (value) + value++; + else + return false; + + for (i = 0; i < count; i++) { + if (of_read_drc_info_cell(&info, &value, &drc)) + return false; + + if (strncmp(drc.drc_type, "CPU", 3)) + break; + + if (drc_index > drc.last_drc_index) + continue; + + index = drc.drc_index_start; + for (j = 0; j < drc.num_sequential_elems; j++) { + if (drc_index == index) + return true; + + index += drc.sequential_inc; + } + } + + return false; +} + static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index) { bool found = false; int rc, index; - index = 0; + if (of_find_property(parent, "ibm,drc-info", NULL)) + return drc_info_valid_index(parent, drc_index); + + /* Note that the format of the ibm,drc-indexes array is + * the number of entries in the array followed by the array + * of drc values so we start looking at index = 1. + */ + index = 1; while (!found) { u32 drc; rc = of_property_read_u32_index(parent, "ibm,drc-indexes", index++, &drc); + if (rc) break; @@ -478,8 +474,8 @@ if (rc) { saved_rc = rc; - pr_warn("Failed to attach node %s, rc: %d, drc index: %x\n", - dn->name, rc, drc_index); + pr_warn("Failed to attach node %pOFn, rc: %d, drc index: %x\n", + dn, rc, drc_index); rc = dlpar_release_drc(drc_index); if (!rc) @@ -491,8 +487,8 @@ rc = dlpar_online_cpu(dn); if (rc) { saved_rc = rc; - pr_warn("Failed to online cpu %s, rc: %d, drc index: %x\n", - dn->name, rc, drc_index); + pr_warn("Failed to online cpu %pOFn, rc: %d, drc index: %x\n", + dn, rc, drc_index); rc = dlpar_detach_node(dn); if (!rc) @@ -501,85 +497,28 @@ return saved_rc; } - pr_debug("Successfully added CPU %s, drc index: %x\n", dn->name, + pr_debug("Successfully added CPU %pOFn, drc index: %x\n", dn, drc_index); return rc; -} - -static int dlpar_offline_cpu(struct device_node *dn) -{ - int rc = 0; - unsigned int cpu; - int len, nthreads, i; - const __be32 *intserv; - u32 thread; - - intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); - if (!intserv) - return -EINVAL; - - nthreads = len / sizeof(u32); - - cpu_maps_update_begin(); - for (i = 0; i < nthreads; i++) { - thread = be32_to_cpu(intserv[i]); - for_each_present_cpu(cpu) { - if (get_hard_smp_processor_id(cpu) != thread) - continue; - - if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE) - break; - - if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) { - set_preferred_offline_state(cpu, - CPU_STATE_OFFLINE); - cpu_maps_update_done(); - timed_topology_update(1); - rc = device_offline(get_cpu_device(cpu)); - if (rc) - goto out; - cpu_maps_update_begin(); - break; - - } - - /* - * The cpu is in CPU_STATE_INACTIVE. - * Upgrade it's state to CPU_STATE_OFFLINE. - */ - set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); - BUG_ON(plpar_hcall_norets(H_PROD, thread) - != H_SUCCESS); - __cpu_die(cpu); - break; - } - if (cpu == num_possible_cpus()) - printk(KERN_WARNING "Could not find cpu to offline with physical id 0x%x\n", thread); - } - cpu_maps_update_done(); - -out: - return rc; - } static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index) { int rc; - pr_debug("Attempting to remove CPU %s, drc index: %x\n", - dn->name, drc_index); + pr_debug("Attempting to remove CPU %pOFn, drc index: %x\n", + dn, drc_index); rc = dlpar_offline_cpu(dn); if (rc) { - pr_warn("Failed to offline CPU %s, rc: %d\n", dn->name, rc); + pr_warn("Failed to offline CPU %pOFn, rc: %d\n", dn, rc); return -EINVAL; } rc = dlpar_release_drc(drc_index); if (rc) { - pr_warn("Failed to release drc (%x) for CPU %s, rc: %d\n", - drc_index, dn->name, rc); + pr_warn("Failed to release drc (%x) for CPU %pOFn, rc: %d\n", + drc_index, dn, rc); dlpar_online_cpu(dn); return rc; } @@ -588,7 +527,7 @@ if (rc) { int saved_rc = rc; - pr_warn("Failed to detach CPU %s, rc: %d", dn->name, rc); + pr_warn("Failed to detach CPU %pOFn, rc: %d", dn, rc); rc = dlpar_acquire_drc(drc_index); if (!rc) @@ -659,8 +598,8 @@ rc = of_property_read_u32(dn, "ibm,my-drc-index", &cpu_drcs[cpus_found - 1]); if (rc) { - pr_warn("Error occurred getting drc-index for %s\n", - dn->name); + pr_warn("Error occurred getting drc-index for %pOFn\n", + dn); of_node_put(dn); return -1; } @@ -718,18 +657,51 @@ return rc; } -static int find_dlpar_cpus_to_add(u32 *cpu_drcs, u32 cpus_to_add) +static int find_drc_info_cpus_to_add(struct device_node *cpus, + struct property *info, + u32 *cpu_drcs, u32 cpus_to_add) { - struct device_node *parent; + struct of_drc_info drc; + const __be32 *value; + u32 count, drc_index; + int cpus_found = 0; + int i, j; + + if (!info) + return -1; + + value = of_prop_next_u32(info, NULL, &count); + if (value) + value++; + + for (i = 0; i < count; i++) { + of_read_drc_info_cell(&info, &value, &drc); + if (strncmp(drc.drc_type, "CPU", 3)) + break; + + drc_index = drc.drc_index_start; + for (j = 0; j < drc.num_sequential_elems; j++) { + if (dlpar_cpu_exists(cpus, drc_index)) + continue; + + cpu_drcs[cpus_found++] = drc_index; + + if (cpus_found == cpus_to_add) + return cpus_found; + + drc_index += drc.sequential_inc; + } + } + + return cpus_found; +} + +static int find_drc_index_cpus_to_add(struct device_node *cpus, + u32 *cpu_drcs, u32 cpus_to_add) +{ int cpus_found = 0; int index, rc; - - parent = of_find_node_by_path("/cpus"); - if (!parent) { - pr_warn("Could not find CPU root node in device tree\n"); - kfree(cpu_drcs); - return -1; - } + u32 drc_index; /* Search the ibm,drc-indexes array for possible CPU drcs to * add. Note that the format of the ibm,drc-indexes array is @@ -738,25 +710,25 @@ */ index = 1; while (cpus_found < cpus_to_add) { - u32 drc; + rc = of_property_read_u32_index(cpus, "ibm,drc-indexes", + index++, &drc_index); - rc = of_property_read_u32_index(parent, "ibm,drc-indexes", - index++, &drc); if (rc) break; - if (dlpar_cpu_exists(parent, drc)) + if (dlpar_cpu_exists(cpus, drc_index)) continue; - cpu_drcs[cpus_found++] = drc; + cpu_drcs[cpus_found++] = drc_index; } - of_node_put(parent); return cpus_found; } static int dlpar_cpu_add_by_count(u32 cpus_to_add) { + struct device_node *parent; + struct property *info; u32 *cpu_drcs; int cpus_added = 0; int cpus_found; @@ -768,7 +740,21 @@ if (!cpu_drcs) return -EINVAL; - cpus_found = find_dlpar_cpus_to_add(cpu_drcs, cpus_to_add); + parent = of_find_node_by_path("/cpus"); + if (!parent) { + pr_warn("Could not find CPU root node in device tree\n"); + kfree(cpu_drcs); + return -1; + } + + info = of_find_property(parent, "ibm,drc-info", NULL); + if (info) + cpus_found = find_drc_info_cpus_to_add(parent, info, cpu_drcs, cpus_to_add); + else + cpus_found = find_drc_index_cpus_to_add(parent, cpu_drcs, cpus_to_add); + + of_node_put(parent); + if (cpus_found < cpus_to_add) { pr_warn("Failed to find enough CPUs (%d of %d) to add\n", cpus_found, cpus_to_add); @@ -796,25 +782,6 @@ } kfree(cpu_drcs); - return rc; -} - -int dlpar_cpu_readd(int cpu) -{ - struct device_node *dn; - struct device *dev; - u32 drc_index; - int rc; - - dev = get_cpu_device(cpu); - dn = dev->of_node; - - rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index); - - rc = dlpar_cpu_remove_by_index(drc_index); - if (!rc) - rc = dlpar_cpu_add(drc_index); - return rc; } @@ -916,27 +883,8 @@ .notifier_call = pseries_smp_notifier, }; -#define MAX_CEDE_LATENCY_LEVELS 4 -#define CEDE_LATENCY_PARAM_LENGTH 10 -#define CEDE_LATENCY_PARAM_MAX_LENGTH \ - (MAX_CEDE_LATENCY_LEVELS * CEDE_LATENCY_PARAM_LENGTH * sizeof(char)) -#define CEDE_LATENCY_TOKEN 45 - -static char cede_parameters[CEDE_LATENCY_PARAM_MAX_LENGTH]; - -static int parse_cede_parameters(void) -{ - memset(cede_parameters, 0, CEDE_LATENCY_PARAM_MAX_LENGTH); - return rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, - NULL, - CEDE_LATENCY_TOKEN, - __pa(cede_parameters), - CEDE_LATENCY_PARAM_MAX_LENGTH); -} - static int __init pseries_cpu_hotplug_init(void) { - int cpu; int qcss_tok; #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE @@ -954,21 +902,13 @@ return 0; } - ppc_md.cpu_die = pseries_mach_cpu_die; + smp_ops->cpu_offline_self = pseries_cpu_offline_self; smp_ops->cpu_disable = pseries_cpu_disable; smp_ops->cpu_die = pseries_cpu_die; /* Processors can be added/removed only on LPAR */ - if (firmware_has_feature(FW_FEATURE_LPAR)) { + if (firmware_has_feature(FW_FEATURE_LPAR)) of_reconfig_notifier_register(&pseries_smp_nb); - cpu_maps_update_begin(); - if (cede_offline_enabled && parse_cede_parameters() == 0) { - default_offline_state = CPU_STATE_INACTIVE; - for_each_online_cpu(cpu) - set_default_offline_state(cpu); - } - cpu_maps_update_done(); - } return 0; } -- Gitblit v1.6.2