From a36159eec6ca17402b0e146b86efaf76568dc353 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 20 Sep 2024 01:41:23 +0000
Subject: [PATCH] 重命名 AX88772C_eeprom/asix.c 为 asix_mac.c

---
 kernel/arch/s390/kernel/perf_cpum_cf.c |  280 +++++++++++++------------------------------------------
 1 files changed, 68 insertions(+), 212 deletions(-)

diff --git a/kernel/arch/s390/kernel/perf_cpum_cf.c b/kernel/arch/s390/kernel/perf_cpum_cf.c
index d5523ad..dddb32e 100644
--- a/kernel/arch/s390/kernel/perf_cpum_cf.c
+++ b/kernel/arch/s390/kernel/perf_cpum_cf.c
@@ -2,81 +2,19 @@
 /*
  * Performance event support for s390x - CPU-measurement Counter Facility
  *
- *  Copyright IBM Corp. 2012, 2017
- *  Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ *  Copyright IBM Corp. 2012, 2019
+ *  Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
  */
 #define KMSG_COMPONENT	"cpum_cf"
 #define pr_fmt(fmt)	KMSG_COMPONENT ": " fmt
 
 #include <linux/kernel.h>
 #include <linux/kernel_stat.h>
-#include <linux/perf_event.h>
 #include <linux/percpu.h>
 #include <linux/notifier.h>
 #include <linux/init.h>
 #include <linux/export.h>
-#include <asm/ctl_reg.h>
-#include <asm/irq.h>
-#include <asm/cpu_mf.h>
-
-enum cpumf_ctr_set {
-	CPUMF_CTR_SET_BASIC   = 0,    /* Basic Counter Set */
-	CPUMF_CTR_SET_USER    = 1,    /* Problem-State Counter Set */
-	CPUMF_CTR_SET_CRYPTO  = 2,    /* Crypto-Activity Counter Set */
-	CPUMF_CTR_SET_EXT     = 3,    /* Extended Counter Set */
-	CPUMF_CTR_SET_MT_DIAG = 4,    /* MT-diagnostic Counter Set */
-
-	/* Maximum number of counter sets */
-	CPUMF_CTR_SET_MAX,
-};
-
-#define CPUMF_LCCTL_ENABLE_SHIFT    16
-#define CPUMF_LCCTL_ACTCTL_SHIFT     0
-static const u64 cpumf_state_ctl[CPUMF_CTR_SET_MAX] = {
-	[CPUMF_CTR_SET_BASIC]	= 0x02,
-	[CPUMF_CTR_SET_USER]	= 0x04,
-	[CPUMF_CTR_SET_CRYPTO]	= 0x08,
-	[CPUMF_CTR_SET_EXT]	= 0x01,
-	[CPUMF_CTR_SET_MT_DIAG] = 0x20,
-};
-
-static void ctr_set_enable(u64 *state, int ctr_set)
-{
-	*state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT;
-}
-static void ctr_set_disable(u64 *state, int ctr_set)
-{
-	*state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT);
-}
-static void ctr_set_start(u64 *state, int ctr_set)
-{
-	*state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT;
-}
-static void ctr_set_stop(u64 *state, int ctr_set)
-{
-	*state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT);
-}
-
-/* Local CPUMF event structure */
-struct cpu_hw_events {
-	struct cpumf_ctr_info	info;
-	atomic_t		ctr_set[CPUMF_CTR_SET_MAX];
-	u64			state, tx_state;
-	unsigned int		flags;
-	unsigned int		txn_flags;
-};
-static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
-	.ctr_set = {
-		[CPUMF_CTR_SET_BASIC]	= ATOMIC_INIT(0),
-		[CPUMF_CTR_SET_USER]	= ATOMIC_INIT(0),
-		[CPUMF_CTR_SET_CRYPTO]	= ATOMIC_INIT(0),
-		[CPUMF_CTR_SET_EXT]	= ATOMIC_INIT(0),
-		[CPUMF_CTR_SET_MT_DIAG] = ATOMIC_INIT(0),
-	},
-	.state = 0,
-	.flags = 0,
-	.txn_flags = 0,
-};
+#include <asm/cpu_mcf.h>
 
 static enum cpumf_ctr_set get_counter_set(u64 event)
 {
@@ -88,7 +26,7 @@
 		set = CPUMF_CTR_SET_USER;
 	else if (event < 128)
 		set = CPUMF_CTR_SET_CRYPTO;
-	else if (event < 256)
+	else if (event < 288)
 		set = CPUMF_CTR_SET_EXT;
 	else if (event >= 448 && event < 496)
 		set = CPUMF_CTR_SET_MT_DIAG;
@@ -98,11 +36,11 @@
 
 static int validate_ctr_version(const struct hw_perf_event *hwc)
 {
-	struct cpu_hw_events *cpuhw;
+	struct cpu_cf_events *cpuhw;
 	int err = 0;
 	u16 mtdiag_ctl;
 
-	cpuhw = &get_cpu_var(cpu_hw_events);
+	cpuhw = &get_cpu_var(cpu_cf_events);
 
 	/* check required version for counter sets */
 	switch (hwc->config_base) {
@@ -112,12 +50,19 @@
 			err = -EOPNOTSUPP;
 		break;
 	case CPUMF_CTR_SET_CRYPTO:
+		if ((cpuhw->info.csvn >= 1 && cpuhw->info.csvn <= 5 &&
+		     hwc->config > 79) ||
+		    (cpuhw->info.csvn >= 6 && hwc->config > 83))
+			err = -EOPNOTSUPP;
+		break;
 	case CPUMF_CTR_SET_EXT:
 		if (cpuhw->info.csvn < 1)
 			err = -EOPNOTSUPP;
 		if ((cpuhw->info.csvn == 1 && hwc->config > 159) ||
 		    (cpuhw->info.csvn == 2 && hwc->config > 175) ||
-		    (cpuhw->info.csvn  > 2 && hwc->config > 255))
+		    (cpuhw->info.csvn >= 3 && cpuhw->info.csvn <= 5
+		     && hwc->config > 255) ||
+		    (cpuhw->info.csvn >= 6 && hwc->config > 287))
 			err = -EOPNOTSUPP;
 		break;
 	case CPUMF_CTR_SET_MT_DIAG:
@@ -135,7 +80,7 @@
 		 * Thus, the counters can only be used if SMT is on and the
 		 * counter set is enabled and active.
 		 */
-		mtdiag_ctl = cpumf_state_ctl[CPUMF_CTR_SET_MT_DIAG];
+		mtdiag_ctl = cpumf_ctr_ctl[CPUMF_CTR_SET_MT_DIAG];
 		if (!((cpuhw->info.auth_ctl & mtdiag_ctl) &&
 		      (cpuhw->info.enable_ctl & mtdiag_ctl) &&
 		      (cpuhw->info.act_ctl & mtdiag_ctl)))
@@ -143,28 +88,28 @@
 		break;
 	}
 
-	put_cpu_var(cpu_hw_events);
+	put_cpu_var(cpu_cf_events);
 	return err;
 }
 
 static int validate_ctr_auth(const struct hw_perf_event *hwc)
 {
-	struct cpu_hw_events *cpuhw;
+	struct cpu_cf_events *cpuhw;
 	u64 ctrs_state;
 	int err = 0;
 
-	cpuhw = &get_cpu_var(cpu_hw_events);
+	cpuhw = &get_cpu_var(cpu_cf_events);
 
 	/* Check authorization for cpu counter sets.
 	 * If the particular CPU counter set is not authorized,
 	 * return with -ENOENT in order to fall back to other
 	 * PMUs that might suffice the event request.
 	 */
-	ctrs_state = cpumf_state_ctl[hwc->config_base];
+	ctrs_state = cpumf_ctr_ctl[hwc->config_base];
 	if (!(ctrs_state & cpuhw->info.auth_ctl))
 		err = -ENOENT;
 
-	put_cpu_var(cpu_hw_events);
+	put_cpu_var(cpu_cf_events);
 	return err;
 }
 
@@ -175,7 +120,7 @@
  */
 static void cpumf_pmu_enable(struct pmu *pmu)
 {
-	struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
 	int err;
 
 	if (cpuhw->flags & PMU_F_ENABLED)
@@ -198,7 +143,7 @@
  */
 static void cpumf_pmu_disable(struct pmu *pmu)
 {
-	struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
 	int err;
 	u64 inactive;
 
@@ -222,86 +167,13 @@
 /* Used to avoid races in calling reserve/release_cpumf_hardware */
 static DEFINE_MUTEX(pmc_reserve_mutex);
 
-/* CPU-measurement alerts for the counter facility */
-static void cpumf_measurement_alert(struct ext_code ext_code,
-				    unsigned int alert, unsigned long unused)
-{
-	struct cpu_hw_events *cpuhw;
-
-	if (!(alert & CPU_MF_INT_CF_MASK))
-		return;
-
-	inc_irq_stat(IRQEXT_CMC);
-	cpuhw = this_cpu_ptr(&cpu_hw_events);
-
-	/* Measurement alerts are shared and might happen when the PMU
-	 * is not reserved.  Ignore these alerts in this case. */
-	if (!(cpuhw->flags & PMU_F_RESERVED))
-		return;
-
-	/* counter authorization change alert */
-	if (alert & CPU_MF_INT_CF_CACA)
-		qctri(&cpuhw->info);
-
-	/* loss of counter data alert */
-	if (alert & CPU_MF_INT_CF_LCDA)
-		pr_err("CPU[%i] Counter data was lost\n", smp_processor_id());
-
-	/* loss of MT counter data alert */
-	if (alert & CPU_MF_INT_CF_MTDA)
-		pr_warn("CPU[%i] MT counter data was lost\n",
-			smp_processor_id());
-}
-
-#define PMC_INIT      0
-#define PMC_RELEASE   1
-static void setup_pmc_cpu(void *flags)
-{
-	struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
-
-	switch (*((int *) flags)) {
-	case PMC_INIT:
-		memset(&cpuhw->info, 0, sizeof(cpuhw->info));
-		qctri(&cpuhw->info);
-		cpuhw->flags |= PMU_F_RESERVED;
-		break;
-
-	case PMC_RELEASE:
-		cpuhw->flags &= ~PMU_F_RESERVED;
-		break;
-	}
-
-	/* Disable CPU counter sets */
-	lcctl(0);
-}
-
-/* Initialize the CPU-measurement facility */
-static int reserve_pmc_hardware(void)
-{
-	int flags = PMC_INIT;
-
-	on_each_cpu(setup_pmc_cpu, &flags, 1);
-	irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
-
-	return 0;
-}
-
-/* Release the CPU-measurement facility */
-static void release_pmc_hardware(void)
-{
-	int flags = PMC_RELEASE;
-
-	on_each_cpu(setup_pmc_cpu, &flags, 1);
-	irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
-}
-
 /* Release the PMU if event is the last perf event */
 static void hw_perf_event_destroy(struct perf_event *event)
 {
 	if (!atomic_add_unless(&num_events, -1, 1)) {
 		mutex_lock(&pmc_reserve_mutex);
 		if (atomic_dec_return(&num_events) == 0)
-			release_pmc_hardware();
+			__kernel_cpumcf_end();
 		mutex_unlock(&pmc_reserve_mutex);
 	}
 }
@@ -327,15 +199,15 @@
 	[PERF_COUNT_HW_BUS_CYCLES]	    = -1,
 };
 
-static int __hw_perf_event_init(struct perf_event *event)
+static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
 {
 	struct perf_event_attr *attr = &event->attr;
 	struct hw_perf_event *hwc = &event->hw;
 	enum cpumf_ctr_set set;
-	int err;
+	int err = 0;
 	u64 ev;
 
-	switch (attr->type) {
+	switch (type) {
 	case PERF_TYPE_RAW:
 		/* Raw events are used to access counters directly,
 		 * hence do not permit excludes */
@@ -402,12 +274,14 @@
 	/* Initialize for using the CPU-measurement counter facility */
 	if (!atomic_inc_not_zero(&num_events)) {
 		mutex_lock(&pmc_reserve_mutex);
-		if (atomic_read(&num_events) == 0 && reserve_pmc_hardware())
+		if (atomic_read(&num_events) == 0 && __kernel_cpumcf_begin())
 			err = -EBUSY;
 		else
 			atomic_inc(&num_events);
 		mutex_unlock(&pmc_reserve_mutex);
 	}
+	if (err)
+		return err;
 	event->destroy = hw_perf_event_destroy;
 
 	/* Finally, validate version and authorization of the counter set */
@@ -418,19 +292,38 @@
 	return err;
 }
 
+/* Events CPU_CYLCES and INSTRUCTIONS can be submitted with two different
+ * attribute::type values:
+ * - PERF_TYPE_HARDWARE:
+ * - pmu->type:
+ * Handle both type of invocations identical. They address the same hardware.
+ * The result is different when event modifiers exclude_kernel and/or
+ * exclude_user are also set.
+ */
+static int cpumf_pmu_event_type(struct perf_event *event)
+{
+	u64 ev = event->attr.config;
+
+	if (cpumf_generic_events_basic[PERF_COUNT_HW_CPU_CYCLES] == ev ||
+	    cpumf_generic_events_basic[PERF_COUNT_HW_INSTRUCTIONS] == ev ||
+	    cpumf_generic_events_user[PERF_COUNT_HW_CPU_CYCLES] == ev ||
+	    cpumf_generic_events_user[PERF_COUNT_HW_INSTRUCTIONS] == ev)
+		return PERF_TYPE_HARDWARE;
+	return PERF_TYPE_RAW;
+}
+
 static int cpumf_pmu_event_init(struct perf_event *event)
 {
+	unsigned int type = event->attr.type;
 	int err;
 
-	switch (event->attr.type) {
-	case PERF_TYPE_HARDWARE:
-	case PERF_TYPE_HW_CACHE:
-	case PERF_TYPE_RAW:
-		err = __hw_perf_event_init(event);
-		break;
-	default:
+	if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_RAW)
+		err = __hw_perf_event_init(event, type);
+	else if (event->pmu->type == type)
+		/* Registered as unknown PMU */
+		err = __hw_perf_event_init(event, cpumf_pmu_event_type(event));
+	else
 		return -ENOENT;
-	}
 
 	if (unlikely(err) && event->destroy)
 		event->destroy(event);
@@ -488,7 +381,7 @@
 
 static void cpumf_pmu_start(struct perf_event *event, int flags)
 {
-	struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
 	struct hw_perf_event *hwc = &event->hw;
 
 	if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
@@ -519,7 +412,7 @@
 
 static void cpumf_pmu_stop(struct perf_event *event, int flags)
 {
-	struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
 	struct hw_perf_event *hwc = &event->hw;
 
 	if (!(hwc->state & PERF_HES_STOPPED)) {
@@ -540,7 +433,7 @@
 
 static int cpumf_pmu_add(struct perf_event *event, int flags)
 {
-	struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
 
 	/* Check authorization for the counter set to which this
 	 * counter belongs.
@@ -564,7 +457,7 @@
 
 static void cpumf_pmu_del(struct perf_event *event, int flags)
 {
-	struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
 
 	cpumf_pmu_stop(event, PERF_EF_UPDATE);
 
@@ -592,7 +485,7 @@
  */
 static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
 {
-	struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
 
 	WARN_ON_ONCE(cpuhw->txn_flags);		/* txn already in flight */
 
@@ -612,7 +505,7 @@
 static void cpumf_pmu_cancel_txn(struct pmu *pmu)
 {
 	unsigned int txn_flags;
-	struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
 
 	WARN_ON_ONCE(!cpuhw->txn_flags);	/* no txn in flight */
 
@@ -633,7 +526,7 @@
  */
 static int cpumf_pmu_commit_txn(struct pmu *pmu)
 {
-	struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
 	u64 state;
 
 	WARN_ON_ONCE(!cpuhw->txn_flags);	/* no txn in flight */
@@ -671,54 +564,17 @@
 	.cancel_txn   = cpumf_pmu_cancel_txn,
 };
 
-static int cpumf_pmf_setup(unsigned int cpu, int flags)
-{
-	local_irq_disable();
-	setup_pmc_cpu(&flags);
-	local_irq_enable();
-	return 0;
-}
-
-static int s390_pmu_online_cpu(unsigned int cpu)
-{
-	return cpumf_pmf_setup(cpu, PMC_INIT);
-}
-
-static int s390_pmu_offline_cpu(unsigned int cpu)
-{
-	return cpumf_pmf_setup(cpu, PMC_RELEASE);
-}
-
 static int __init cpumf_pmu_init(void)
 {
 	int rc;
 
-	if (!cpum_cf_avail())
+	if (!kernel_cpumcf_avail())
 		return -ENODEV;
 
-	/* clear bit 15 of cr0 to unauthorize problem-state to
-	 * extract measurement counters */
-	ctl_clear_bit(0, 48);
-
-	/* register handler for measurement-alert interruptions */
-	rc = register_external_irq(EXT_IRQ_MEASURE_ALERT,
-				   cpumf_measurement_alert);
-	if (rc) {
-		pr_err("Registering for CPU-measurement alerts "
-		       "failed with rc=%i\n", rc);
-		return rc;
-	}
-
 	cpumf_pmu.attr_groups = cpumf_cf_event_group();
-	rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
-	if (rc) {
+	rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", -1);
+	if (rc)
 		pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
-		unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
-					cpumf_measurement_alert);
-		return rc;
-	}
-	return cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE,
-				 "perf/s390/cf:online",
-				 s390_pmu_online_cpu, s390_pmu_offline_cpu);
+	return rc;
 }
-early_initcall(cpumf_pmu_init);
+subsys_initcall(cpumf_pmu_init);

--
Gitblit v1.6.2