From b22da3d8526a935aa31e086e63f60ff3246cb61c Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Sat, 09 Dec 2023 07:24:11 +0000
Subject: [PATCH] add stmac read mac form eeprom
---
kernel/drivers/platform/x86/intel_pmc_core.c | 1006 +++++++++++++++++++++++++++++++++++++++++++++++++---------
1 files changed, 843 insertions(+), 163 deletions(-)
diff --git a/kernel/drivers/platform/x86/intel_pmc_core.c b/kernel/drivers/platform/x86/intel_pmc_core.c
index 36bd254..ca32a4c 100644
--- a/kernel/drivers/platform/x86/intel_pmc_core.c
+++ b/kernel/drivers/platform/x86/intel_pmc_core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Core SoC Power Management Controller Driver
*
@@ -6,44 +7,50 @@
*
* Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
* Vishwanath Somayaji <vishwanath.somayaji@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
#include <linux/uaccess.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+#include <asm/msr.h>
+#include <asm/tsc.h>
#include "intel_pmc_core.h"
-#define ICPU(model, data) \
- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (kernel_ulong_t)data }
-
static struct pmc_dev pmc;
+
+/* PKGC MSRs are common across Intel Core SoCs */
+static const struct pmc_bit_map msr_map[] = {
+ {"Package C2", MSR_PKG_C2_RESIDENCY},
+ {"Package C3", MSR_PKG_C3_RESIDENCY},
+ {"Package C6", MSR_PKG_C6_RESIDENCY},
+ {"Package C7", MSR_PKG_C7_RESIDENCY},
+ {"Package C8", MSR_PKG_C8_RESIDENCY},
+ {"Package C9", MSR_PKG_C9_RESIDENCY},
+ {"Package C10", MSR_PKG_C10_RESIDENCY},
+ {}
+};
static const struct pmc_bit_map spt_pll_map[] = {
{"MIPI PLL", SPT_PMC_BIT_MPHY_CMN_LANE0},
{"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1},
{"DMIPCIE3 PLL", SPT_PMC_BIT_MPHY_CMN_LANE2},
{"SATA PLL", SPT_PMC_BIT_MPHY_CMN_LANE3},
- {},
+ {}
};
static const struct pmc_bit_map spt_mphy_map[] = {
@@ -63,7 +70,7 @@
{"MPHY CORE LANE 13", SPT_PMC_BIT_MPHY_LANE13},
{"MPHY CORE LANE 14", SPT_PMC_BIT_MPHY_LANE14},
{"MPHY CORE LANE 15", SPT_PMC_BIT_MPHY_LANE15},
- {},
+ {}
};
static const struct pmc_bit_map spt_pfear_map[] = {
@@ -107,23 +114,62 @@
{"CSME_SMS1", SPT_PMC_BIT_CSME_SMS1},
{"CSME_RTC", SPT_PMC_BIT_CSME_RTC},
{"CSME_PSF", SPT_PMC_BIT_CSME_PSF},
- {},
+ {}
+};
+
+static const struct pmc_bit_map *ext_spt_pfear_map[] = {
+ /*
+ * Check intel_pmc_core_ids[] users of spt_reg_map for
+ * a list of core SoCs using this.
+ */
+ spt_pfear_map,
+ NULL
+};
+
+static const struct pmc_bit_map spt_ltr_show_map[] = {
+ {"SOUTHPORT_A", SPT_PMC_LTR_SPA},
+ {"SOUTHPORT_B", SPT_PMC_LTR_SPB},
+ {"SATA", SPT_PMC_LTR_SATA},
+ {"GIGABIT_ETHERNET", SPT_PMC_LTR_GBE},
+ {"XHCI", SPT_PMC_LTR_XHCI},
+ {"Reserved", SPT_PMC_LTR_RESERVED},
+ {"ME", SPT_PMC_LTR_ME},
+ /* EVA is Enterprise Value Add, doesn't really exist on PCH */
+ {"EVA", SPT_PMC_LTR_EVA},
+ {"SOUTHPORT_C", SPT_PMC_LTR_SPC},
+ {"HD_AUDIO", SPT_PMC_LTR_AZ},
+ {"LPSS", SPT_PMC_LTR_LPSS},
+ {"SOUTHPORT_D", SPT_PMC_LTR_SPD},
+ {"SOUTHPORT_E", SPT_PMC_LTR_SPE},
+ {"CAMERA", SPT_PMC_LTR_CAM},
+ {"ESPI", SPT_PMC_LTR_ESPI},
+ {"SCC", SPT_PMC_LTR_SCC},
+ {"ISH", SPT_PMC_LTR_ISH},
+ /* Below two cannot be used for LTR_IGNORE */
+ {"CURRENT_PLATFORM", SPT_PMC_LTR_CUR_PLT},
+ {"AGGREGATED_SYSTEM", SPT_PMC_LTR_CUR_ASLT},
+ {}
};
static const struct pmc_reg_map spt_reg_map = {
- .pfear_sts = spt_pfear_map,
+ .pfear_sts = ext_spt_pfear_map,
.mphy_sts = spt_mphy_map,
.pll_sts = spt_pll_map,
+ .ltr_show_sts = spt_ltr_show_map,
+ .msr_sts = msr_map,
.slp_s0_offset = SPT_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = SPT_PMC_SLP_S0_RES_COUNTER_STEP,
.ltr_ignore_offset = SPT_PMC_LTR_IGNORE_OFFSET,
.regmap_length = SPT_PMC_MMIO_REG_LEN,
.ppfear0_offset = SPT_PMC_XRAM_PPFEAR0A,
.ppfear_buckets = SPT_PPFEAR_NUM_ENTRIES,
.pm_cfg_offset = SPT_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = SPT_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = SPT_NUM_IP_IGN_ALLOWED,
+ .pm_vric1_offset = SPT_PMC_VRIC1_OFFSET,
};
-/* Cannonlake: PGD PFET Enable Ack Status Register(s) bitmap */
+/* Cannon Lake: PGD PFET Enable Ack Status Register(s) bitmap */
static const struct pmc_bit_map cnp_pfear_map[] = {
{"PMC", BIT(0)},
{"OPI-DMI", BIT(1)},
@@ -150,25 +196,25 @@
{"SDX", BIT(4)},
{"SPE", BIT(5)},
{"Fuse", BIT(6)},
- {"Res_23", BIT(7)},
+ {"SBR8", BIT(7)},
{"CSME_FSC", BIT(0)},
{"USB3_OTG", BIT(1)},
{"EXI", BIT(2)},
{"CSE", BIT(3)},
- {"csme_kvm", BIT(4)},
- {"csme_pmt", BIT(5)},
- {"csme_clink", BIT(6)},
- {"csme_ptio", BIT(7)},
+ {"CSME_KVM", BIT(4)},
+ {"CSME_PMT", BIT(5)},
+ {"CSME_CLINK", BIT(6)},
+ {"CSME_PTIO", BIT(7)},
- {"csme_usbr", BIT(0)},
- {"csme_susram", BIT(1)},
- {"csme_smt1", BIT(2)},
+ {"CSME_USBR", BIT(0)},
+ {"CSME_SUSRAM", BIT(1)},
+ {"CSME_SMT1", BIT(2)},
{"CSME_SMT4", BIT(3)},
- {"csme_sms2", BIT(4)},
- {"csme_sms1", BIT(5)},
- {"csme_rtc", BIT(6)},
- {"csme_psf", BIT(7)},
+ {"CSME_SMS2", BIT(4)},
+ {"CSME_SMS1", BIT(5)},
+ {"CSME_RTC", BIT(6)},
+ {"CSME_PSF", BIT(7)},
{"SBR0", BIT(0)},
{"SBR1", BIT(1)},
@@ -193,7 +239,62 @@
{"HDA_PGD4", BIT(2)},
{"HDA_PGD5", BIT(3)},
{"HDA_PGD6", BIT(4)},
+ {"PSF6", BIT(5)},
+ {"PSF7", BIT(6)},
+ {"PSF8", BIT(7)},
{}
+};
+
+static const struct pmc_bit_map *ext_cnp_pfear_map[] = {
+ /*
+ * Check intel_pmc_core_ids[] users of cnp_reg_map for
+ * a list of core SoCs using this.
+ */
+ cnp_pfear_map,
+ NULL
+};
+
+static const struct pmc_bit_map icl_pfear_map[] = {
+ {"RES_65", BIT(0)},
+ {"RES_66", BIT(1)},
+ {"RES_67", BIT(2)},
+ {"TAM", BIT(3)},
+ {"GBETSN", BIT(4)},
+ {"TBTLSX", BIT(5)},
+ {"RES_71", BIT(6)},
+ {"RES_72", BIT(7)},
+ {}
+};
+
+static const struct pmc_bit_map *ext_icl_pfear_map[] = {
+ /*
+ * Check intel_pmc_core_ids[] users of icl_reg_map for
+ * a list of core SoCs using this.
+ */
+ cnp_pfear_map,
+ icl_pfear_map,
+ NULL
+};
+
+static const struct pmc_bit_map tgl_pfear_map[] = {
+ {"PSF9", BIT(0)},
+ {"RES_66", BIT(1)},
+ {"RES_67", BIT(2)},
+ {"RES_68", BIT(3)},
+ {"RES_69", BIT(4)},
+ {"RES_70", BIT(5)},
+ {"TBTLSX", BIT(6)},
+ {}
+};
+
+static const struct pmc_bit_map *ext_tgl_pfear_map[] = {
+ /*
+ * Check intel_pmc_core_ids[] users of tgl_reg_map for
+ * a list of core SoCs using this.
+ */
+ cnp_pfear_map,
+ tgl_pfear_map,
+ NULL
};
static const struct pmc_bit_map cnp_slps0_dbg0_map[] = {
@@ -249,13 +350,49 @@
cnp_slps0_dbg0_map,
cnp_slps0_dbg1_map,
cnp_slps0_dbg2_map,
- NULL,
+ NULL
+};
+
+static const struct pmc_bit_map cnp_ltr_show_map[] = {
+ {"SOUTHPORT_A", CNP_PMC_LTR_SPA},
+ {"SOUTHPORT_B", CNP_PMC_LTR_SPB},
+ {"SATA", CNP_PMC_LTR_SATA},
+ {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE},
+ {"XHCI", CNP_PMC_LTR_XHCI},
+ {"Reserved", CNP_PMC_LTR_RESERVED},
+ {"ME", CNP_PMC_LTR_ME},
+ /* EVA is Enterprise Value Add, doesn't really exist on PCH */
+ {"EVA", CNP_PMC_LTR_EVA},
+ {"SOUTHPORT_C", CNP_PMC_LTR_SPC},
+ {"HD_AUDIO", CNP_PMC_LTR_AZ},
+ {"CNV", CNP_PMC_LTR_CNV},
+ {"LPSS", CNP_PMC_LTR_LPSS},
+ {"SOUTHPORT_D", CNP_PMC_LTR_SPD},
+ {"SOUTHPORT_E", CNP_PMC_LTR_SPE},
+ {"CAMERA", CNP_PMC_LTR_CAM},
+ {"ESPI", CNP_PMC_LTR_ESPI},
+ {"SCC", CNP_PMC_LTR_SCC},
+ {"ISH", CNP_PMC_LTR_ISH},
+ {"UFSX2", CNP_PMC_LTR_UFSX2},
+ {"EMMC", CNP_PMC_LTR_EMMC},
+ /*
+ * Check intel_pmc_core_ids[] users of cnp_reg_map for
+ * a list of core SoCs using this.
+ */
+ {"WIGIG", ICL_PMC_LTR_WIGIG},
+ /* Below two cannot be used for LTR_IGNORE */
+ {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
+ {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT},
+ {}
};
static const struct pmc_reg_map cnp_reg_map = {
- .pfear_sts = cnp_pfear_map,
+ .pfear_sts = ext_cnp_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = SPT_PMC_SLP_S0_RES_COUNTER_STEP,
.slps0_dbg_maps = cnp_slps0_dbg_maps,
+ .ltr_show_sts = cnp_ltr_show_map,
+ .msr_sts = msr_map,
.slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
.ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
.regmap_length = CNP_PMC_MMIO_REG_LEN,
@@ -263,27 +400,207 @@
.ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES,
.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = CNP_NUM_IP_IGN_ALLOWED,
};
-static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
-{
- return readb(pmcdev->regbase + offset);
-}
+static const struct pmc_reg_map icl_reg_map = {
+ .pfear_sts = ext_icl_pfear_map,
+ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = ICL_PMC_SLP_S0_RES_COUNTER_STEP,
+ .slps0_dbg_maps = cnp_slps0_dbg_maps,
+ .ltr_show_sts = cnp_ltr_show_map,
+ .msr_sts = msr_map,
+ .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = CNP_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED,
+};
+
+static const struct pmc_bit_map tgl_clocksource_status_map[] = {
+ {"USB2PLL_OFF_STS", BIT(18)},
+ {"PCIe/USB3.1_Gen2PLL_OFF_STS", BIT(19)},
+ {"PCIe_Gen3PLL_OFF_STS", BIT(20)},
+ {"OPIOPLL_OFF_STS", BIT(21)},
+ {"OCPLL_OFF_STS", BIT(22)},
+ {"MainPLL_OFF_STS", BIT(23)},
+ {"MIPIPLL_OFF_STS", BIT(24)},
+ {"Fast_XTAL_Osc_OFF_STS", BIT(25)},
+ {"AC_Ring_Osc_OFF_STS", BIT(26)},
+ {"MC_Ring_Osc_OFF_STS", BIT(27)},
+ {"SATAPLL_OFF_STS", BIT(29)},
+ {"XTAL_USB2PLL_OFF_STS", BIT(31)},
+ {}
+};
+
+static const struct pmc_bit_map tgl_power_gating_status_map[] = {
+ {"CSME_PG_STS", BIT(0)},
+ {"SATA_PG_STS", BIT(1)},
+ {"xHCI_PG_STS", BIT(2)},
+ {"UFSX2_PG_STS", BIT(3)},
+ {"OTG_PG_STS", BIT(5)},
+ {"SPA_PG_STS", BIT(6)},
+ {"SPB_PG_STS", BIT(7)},
+ {"SPC_PG_STS", BIT(8)},
+ {"SPD_PG_STS", BIT(9)},
+ {"SPE_PG_STS", BIT(10)},
+ {"SPF_PG_STS", BIT(11)},
+ {"LSX_PG_STS", BIT(13)},
+ {"P2SB_PG_STS", BIT(14)},
+ {"PSF_PG_STS", BIT(15)},
+ {"SBR_PG_STS", BIT(16)},
+ {"OPIDMI_PG_STS", BIT(17)},
+ {"THC0_PG_STS", BIT(18)},
+ {"THC1_PG_STS", BIT(19)},
+ {"GBETSN_PG_STS", BIT(20)},
+ {"GBE_PG_STS", BIT(21)},
+ {"LPSS_PG_STS", BIT(22)},
+ {"MMP_UFSX2_PG_STS", BIT(23)},
+ {"MMP_UFSX2B_PG_STS", BIT(24)},
+ {"FIA_PG_STS", BIT(25)},
+ {}
+};
+
+static const struct pmc_bit_map tgl_d3_status_map[] = {
+ {"ADSP_D3_STS", BIT(0)},
+ {"SATA_D3_STS", BIT(1)},
+ {"xHCI0_D3_STS", BIT(2)},
+ {"xDCI1_D3_STS", BIT(5)},
+ {"SDX_D3_STS", BIT(6)},
+ {"EMMC_D3_STS", BIT(7)},
+ {"IS_D3_STS", BIT(8)},
+ {"THC0_D3_STS", BIT(9)},
+ {"THC1_D3_STS", BIT(10)},
+ {"GBE_D3_STS", BIT(11)},
+ {"GBE_TSN_D3_STS", BIT(12)},
+ {}
+};
+
+static const struct pmc_bit_map tgl_vnn_req_status_map[] = {
+ {"GPIO_COM0_VNN_REQ_STS", BIT(1)},
+ {"GPIO_COM1_VNN_REQ_STS", BIT(2)},
+ {"GPIO_COM2_VNN_REQ_STS", BIT(3)},
+ {"GPIO_COM3_VNN_REQ_STS", BIT(4)},
+ {"GPIO_COM4_VNN_REQ_STS", BIT(5)},
+ {"GPIO_COM5_VNN_REQ_STS", BIT(6)},
+ {"Audio_VNN_REQ_STS", BIT(7)},
+ {"ISH_VNN_REQ_STS", BIT(8)},
+ {"CNVI_VNN_REQ_STS", BIT(9)},
+ {"eSPI_VNN_REQ_STS", BIT(10)},
+ {"Display_VNN_REQ_STS", BIT(11)},
+ {"DTS_VNN_REQ_STS", BIT(12)},
+ {"SMBUS_VNN_REQ_STS", BIT(14)},
+ {"CSME_VNN_REQ_STS", BIT(15)},
+ {"SMLINK0_VNN_REQ_STS", BIT(16)},
+ {"SMLINK1_VNN_REQ_STS", BIT(17)},
+ {"CLINK_VNN_REQ_STS", BIT(20)},
+ {"DCI_VNN_REQ_STS", BIT(21)},
+ {"ITH_VNN_REQ_STS", BIT(22)},
+ {"CSME_VNN_REQ_STS", BIT(24)},
+ {"GBE_VNN_REQ_STS", BIT(25)},
+ {}
+};
+
+static const struct pmc_bit_map tgl_vnn_misc_status_map[] = {
+ {"CPU_C10_REQ_STS_0", BIT(0)},
+ {"PCIe_LPM_En_REQ_STS_3", BIT(3)},
+ {"ITH_REQ_STS_5", BIT(5)},
+ {"CNVI_REQ_STS_6", BIT(6)},
+ {"ISH_REQ_STS_7", BIT(7)},
+ {"USB2_SUS_PG_Sys_REQ_STS_10", BIT(10)},
+ {"PCIe_Clk_REQ_STS_12", BIT(12)},
+ {"MPHY_Core_DL_REQ_STS_16", BIT(16)},
+ {"Break-even_En_REQ_STS_17", BIT(17)},
+ {"Auto-demo_En_REQ_STS_18", BIT(18)},
+ {"MPHY_SUS_REQ_STS_22", BIT(22)},
+ {"xDCI_attached_REQ_STS_24", BIT(24)},
+ {}
+};
+
+static const struct pmc_bit_map tgl_signal_status_map[] = {
+ {"LSX_Wake0_En_STS", BIT(0)},
+ {"LSX_Wake0_Pol_STS", BIT(1)},
+ {"LSX_Wake1_En_STS", BIT(2)},
+ {"LSX_Wake1_Pol_STS", BIT(3)},
+ {"LSX_Wake2_En_STS", BIT(4)},
+ {"LSX_Wake2_Pol_STS", BIT(5)},
+ {"LSX_Wake3_En_STS", BIT(6)},
+ {"LSX_Wake3_Pol_STS", BIT(7)},
+ {"LSX_Wake4_En_STS", BIT(8)},
+ {"LSX_Wake4_Pol_STS", BIT(9)},
+ {"LSX_Wake5_En_STS", BIT(10)},
+ {"LSX_Wake5_Pol_STS", BIT(11)},
+ {"LSX_Wake6_En_STS", BIT(12)},
+ {"LSX_Wake6_Pol_STS", BIT(13)},
+ {"LSX_Wake7_En_STS", BIT(14)},
+ {"LSX_Wake7_Pol_STS", BIT(15)},
+ {"Intel_Se_IO_Wake0_En_STS", BIT(16)},
+ {"Intel_Se_IO_Wake0_Pol_STS", BIT(17)},
+ {"Intel_Se_IO_Wake1_En_STS", BIT(18)},
+ {"Intel_Se_IO_Wake1_Pol_STS", BIT(19)},
+ {"Int_Timer_SS_Wake0_En_STS", BIT(20)},
+ {"Int_Timer_SS_Wake0_Pol_STS", BIT(21)},
+ {"Int_Timer_SS_Wake1_En_STS", BIT(22)},
+ {"Int_Timer_SS_Wake1_Pol_STS", BIT(23)},
+ {"Int_Timer_SS_Wake2_En_STS", BIT(24)},
+ {"Int_Timer_SS_Wake2_Pol_STS", BIT(25)},
+ {"Int_Timer_SS_Wake3_En_STS", BIT(26)},
+ {"Int_Timer_SS_Wake3_Pol_STS", BIT(27)},
+ {"Int_Timer_SS_Wake4_En_STS", BIT(28)},
+ {"Int_Timer_SS_Wake4_Pol_STS", BIT(29)},
+ {"Int_Timer_SS_Wake5_En_STS", BIT(30)},
+ {"Int_Timer_SS_Wake5_Pol_STS", BIT(31)},
+ {}
+};
+
+static const struct pmc_bit_map *tgl_lpm_maps[] = {
+ tgl_clocksource_status_map,
+ tgl_power_gating_status_map,
+ tgl_d3_status_map,
+ tgl_vnn_req_status_map,
+ tgl_vnn_misc_status_map,
+ tgl_signal_status_map,
+ NULL
+};
+
+static const struct pmc_reg_map tgl_reg_map = {
+ .pfear_sts = ext_tgl_pfear_map,
+ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
+ .ltr_show_sts = cnp_ltr_show_map,
+ .msr_sts = msr_map,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = CNP_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED,
+ .lpm_modes = tgl_lpm_modes,
+ .lpm_en_offset = TGL_LPM_EN_OFFSET,
+ .lpm_residency_offset = TGL_LPM_RESIDENCY_OFFSET,
+ .lpm_sts = tgl_lpm_maps,
+ .lpm_status_offset = TGL_LPM_STATUS_OFFSET,
+ .lpm_live_status_offset = TGL_LPM_LIVE_STATUS_OFFSET,
+};
static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
{
return readl(pmcdev->regbase + reg_offset);
}
-static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int
- reg_offset, u32 val)
+static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset,
+ u32 val)
{
writel(val, pmcdev->regbase + reg_offset);
}
-static inline u32 pmc_core_adjust_slp_s0_step(u32 value)
+static inline u64 pmc_core_adjust_slp_s0_step(struct pmc_dev *pmcdev, u32 value)
{
- return value * SPT_PMC_SLP_S0_RES_COUNTER_STEP;
+ return (u64)value * pmcdev->map->slp_s0_res_counter_step;
}
static int pmc_core_dev_state_get(void *data, u64 *val)
@@ -293,7 +610,7 @@
u32 value;
value = pmc_core_reg_read(pmcdev, map->slp_s0_offset);
- *val = pmc_core_adjust_slp_s0_step(value);
+ *val = pmc_core_adjust_slp_s0_step(pmcdev, value);
return 0;
}
@@ -309,23 +626,105 @@
return value & BIT(pmcdev->map->pm_read_disable_bit);
}
-#if IS_ENABLED(CONFIG_DEBUG_FS)
-static bool slps0_dbg_latch;
-
-static void pmc_core_display_map(struct seq_file *s, int index,
- u8 pf_reg, const struct pmc_bit_map *pf_map)
+static void pmc_core_slps0_display(struct pmc_dev *pmcdev, struct device *dev,
+ struct seq_file *s)
{
- seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n",
- index, pf_map[index].name,
- pf_map[index].bit_mask & pf_reg ? "Off" : "On");
+ const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps;
+ const struct pmc_bit_map *map;
+ int offset = pmcdev->map->slps0_dbg_offset;
+ u32 data;
+
+ while (*maps) {
+ map = *maps;
+ data = pmc_core_reg_read(pmcdev, offset);
+ offset += 4;
+ while (map->name) {
+ if (dev)
+ dev_info(dev, "SLP_S0_DBG: %-32s\tState: %s\n",
+ map->name,
+ data & map->bit_mask ? "Yes" : "No");
+ if (s)
+ seq_printf(s, "SLP_S0_DBG: %-32s\tState: %s\n",
+ map->name,
+ data & map->bit_mask ? "Yes" : "No");
+ ++map;
+ }
+ ++maps;
+ }
}
-static int pmc_core_ppfear_sts_show(struct seq_file *s, void *unused)
+static int pmc_core_lpm_get_arr_size(const struct pmc_bit_map **maps)
+{
+ int idx;
+
+ for (idx = 0; maps[idx]; idx++)
+ ;/* Nothing */
+
+ return idx;
+}
+
+static void pmc_core_lpm_display(struct pmc_dev *pmcdev, struct device *dev,
+ struct seq_file *s, u32 offset,
+ const char *str,
+ const struct pmc_bit_map **maps)
+{
+ int index, idx, len = 32, bit_mask, arr_size;
+ u32 *lpm_regs;
+
+ arr_size = pmc_core_lpm_get_arr_size(maps);
+ lpm_regs = kmalloc_array(arr_size, sizeof(*lpm_regs), GFP_KERNEL);
+ if (!lpm_regs)
+ return;
+
+ for (index = 0; index < arr_size; index++) {
+ lpm_regs[index] = pmc_core_reg_read(pmcdev, offset);
+ offset += 4;
+ }
+
+ for (idx = 0; idx < arr_size; idx++) {
+ if (dev)
+ dev_info(dev, "\nLPM_%s_%d:\t0x%x\n", str, idx,
+ lpm_regs[idx]);
+ if (s)
+ seq_printf(s, "\nLPM_%s_%d:\t0x%x\n", str, idx,
+ lpm_regs[idx]);
+ for (index = 0; maps[idx][index].name && index < len; index++) {
+ bit_mask = maps[idx][index].bit_mask;
+ if (dev)
+ dev_info(dev, "%-30s %-30d\n",
+ maps[idx][index].name,
+ lpm_regs[idx] & bit_mask ? 1 : 0);
+ if (s)
+ seq_printf(s, "%-30s %-30d\n",
+ maps[idx][index].name,
+ lpm_regs[idx] & bit_mask ? 1 : 0);
+ }
+ }
+
+ kfree(lpm_regs);
+}
+
+static bool slps0_dbg_latch;
+
+static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
+{
+ return readb(pmcdev->regbase + offset);
+}
+
+static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip,
+ u8 pf_reg, const struct pmc_bit_map **pf_map)
+{
+ seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n",
+ ip, pf_map[idx][index].name,
+ pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On");
+}
+
+static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- const struct pmc_bit_map *map = pmcdev->map->pfear_sts;
+ const struct pmc_bit_map **maps = pmcdev->map->pfear_sts;
u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
- int index, iter;
+ int index, iter, idx, ip = 0;
iter = pmcdev->map->ppfear0_offset;
@@ -333,24 +732,16 @@
index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
- for (index = 0; map[index].name &&
- index < pmcdev->map->ppfear_buckets * 8; index++)
- pmc_core_display_map(s, index, pf_regs[index / 8], map);
+ for (idx = 0; maps[idx]; idx++) {
+ for (index = 0; maps[idx][index].name &&
+ index < pmcdev->map->ppfear_buckets * 8; ip++, index++)
+ pmc_core_display_map(s, index, idx, ip,
+ pf_regs[index / 8], maps);
+ }
return 0;
}
-
-static int pmc_core_ppfear_sts_open(struct inode *inode, struct file *file)
-{
- return single_open(file, pmc_core_ppfear_sts_show, inode->i_private);
-}
-
-static const struct file_operations pmc_core_ppfear_ops = {
- .open = pmc_core_ppfear_sts_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(pmc_core_ppfear);
/* This function should return link status, 0 means ready */
static int pmc_core_mtpmc_link_status(void)
@@ -382,7 +773,7 @@
return 0;
}
-static int pmc_core_mphy_pg_sts_show(struct seq_file *s, void *unused)
+static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
const struct pmc_bit_map *map = pmcdev->map->mphy_sts;
@@ -416,7 +807,7 @@
msleep(10);
val_high = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
- for (index = 0; map[index].name && index < 8; index++) {
+ for (index = 0; index < 8 && map[index].name; index++) {
seq_printf(s, "%-32s\tState: %s\n",
map[index].name,
map[index].bit_mask & val_low ? "Not power gated" :
@@ -434,18 +825,7 @@
mutex_unlock(&pmcdev->lock);
return err;
}
-
-static int pmc_core_mphy_pg_sts_open(struct inode *inode, struct file *file)
-{
- return single_open(file, pmc_core_mphy_pg_sts_show, inode->i_private);
-}
-
-static const struct file_operations pmc_core_mphy_pg_ops = {
- .open = pmc_core_mphy_pg_sts_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(pmc_core_mphy_pg);
static int pmc_core_pll_show(struct seq_file *s, void *unused)
{
@@ -481,46 +861,47 @@
mutex_unlock(&pmcdev->lock);
return err;
}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
-static int pmc_core_pll_open(struct inode *inode, struct file *file)
-{
- return single_open(file, pmc_core_pll_show, inode->i_private);
-}
-
-static const struct file_operations pmc_core_pll_ops = {
- .open = pmc_core_pll_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static ssize_t pmc_core_ltr_ignore_write(struct file *file, const char __user
-*userbuf, size_t count, loff_t *ppos)
+static int pmc_core_send_ltr_ignore(u32 value)
{
struct pmc_dev *pmcdev = &pmc;
const struct pmc_reg_map *map = pmcdev->map;
- u32 val, buf_size, fd;
+ u32 reg;
int err = 0;
- buf_size = count < 64 ? count : 64;
mutex_lock(&pmcdev->lock);
- if (kstrtou32_from_user(userbuf, buf_size, 10, &val)) {
- err = -EFAULT;
- goto out_unlock;
- }
-
- if (val > NUM_IP_IGN_ALLOWED) {
+ if (value > map->ltr_ignore_max) {
err = -EINVAL;
goto out_unlock;
}
- fd = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset);
- fd |= (1U << val);
- pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, fd);
+ reg = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset);
+ reg |= BIT(value);
+ pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, reg);
out_unlock:
mutex_unlock(&pmcdev->lock);
+
+ return err;
+}
+
+static ssize_t pmc_core_ltr_ignore_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ u32 buf_size, value;
+ int err;
+
+ buf_size = min_t(u32, count, 64);
+
+ err = kstrtou32_from_user(userbuf, buf_size, 10, &value);
+ if (err)
+ return err;
+
+ err = pmc_core_send_ltr_ignore(value);
+
return err == 0 ? count : err;
}
@@ -568,63 +949,187 @@
static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps;
- const struct pmc_bit_map *map;
- int offset;
- u32 data;
pmc_core_slps0_dbg_latch(pmcdev, false);
- offset = pmcdev->map->slps0_dbg_offset;
- while (*maps) {
- map = *maps;
- data = pmc_core_reg_read(pmcdev, offset);
- offset += 4;
- while (map->name) {
- seq_printf(s, "SLP_S0_DBG: %-32s\tState: %s\n",
- map->name,
- data & map->bit_mask ?
- "Yes" : "No");
- ++map;
- }
- ++maps;
- }
+ pmc_core_slps0_display(pmcdev, NULL, s);
pmc_core_slps0_dbg_latch(pmcdev, true);
+
return 0;
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_slps0_dbg);
+
+static u32 convert_ltr_scale(u32 val)
+{
+ /*
+ * As per PCIE specification supporting document
+ * ECN_LatencyTolnReporting_14Aug08.pdf the Latency
+ * Tolerance Reporting data payload is encoded in a
+ * 3 bit scale and 10 bit value fields. Values are
+ * multiplied by the indicated scale to yield an absolute time
+ * value, expressible in a range from 1 nanosecond to
+ * 2^25*(2^10-1) = 34,326,183,936 nanoseconds.
+ *
+ * scale encoding is as follows:
+ *
+ * ----------------------------------------------
+ * |scale factor | Multiplier (ns) |
+ * ----------------------------------------------
+ * | 0 | 1 |
+ * | 1 | 32 |
+ * | 2 | 1024 |
+ * | 3 | 32768 |
+ * | 4 | 1048576 |
+ * | 5 | 33554432 |
+ * | 6 | Invalid |
+ * | 7 | Invalid |
+ * ----------------------------------------------
+ */
+ if (val > 5) {
+ pr_warn("Invalid LTR scale factor.\n");
+ return 0;
+ }
+
+ return 1U << (5 * val);
+}
+
+static int pmc_core_ltr_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map *map = pmcdev->map->ltr_show_sts;
+ u64 decoded_snoop_ltr, decoded_non_snoop_ltr;
+ u32 ltr_raw_data, scale, val;
+ u16 snoop_ltr, nonsnoop_ltr;
+ int index;
+
+ for (index = 0; map[index].name ; index++) {
+ decoded_snoop_ltr = decoded_non_snoop_ltr = 0;
+ ltr_raw_data = pmc_core_reg_read(pmcdev,
+ map[index].bit_mask);
+ snoop_ltr = ltr_raw_data & ~MTPMC_MASK;
+ nonsnoop_ltr = (ltr_raw_data >> 0x10) & ~MTPMC_MASK;
+
+ if (FIELD_GET(LTR_REQ_NONSNOOP, ltr_raw_data)) {
+ scale = FIELD_GET(LTR_DECODED_SCALE, nonsnoop_ltr);
+ val = FIELD_GET(LTR_DECODED_VAL, nonsnoop_ltr);
+ decoded_non_snoop_ltr = val * convert_ltr_scale(scale);
+ }
+
+ if (FIELD_GET(LTR_REQ_SNOOP, ltr_raw_data)) {
+ scale = FIELD_GET(LTR_DECODED_SCALE, snoop_ltr);
+ val = FIELD_GET(LTR_DECODED_VAL, snoop_ltr);
+ decoded_snoop_ltr = val * convert_ltr_scale(scale);
+ }
+
+ seq_printf(s, "%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\n",
+ map[index].name, ltr_raw_data,
+ decoded_non_snoop_ltr,
+ decoded_snoop_ltr);
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
+
+static int pmc_core_substate_res_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const char **lpm_modes = pmcdev->map->lpm_modes;
+ u32 offset = pmcdev->map->lpm_residency_offset;
+ u32 lpm_en;
+ int index;
+
+ lpm_en = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_en_offset);
+ seq_printf(s, "status substate residency\n");
+ for (index = 0; lpm_modes[index]; index++) {
+ seq_printf(s, "%7s %7s %-15u\n",
+ BIT(index) & lpm_en ? "Enabled" : " ",
+ lpm_modes[index], pmc_core_reg_read(pmcdev, offset));
+ offset += 4;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_res);
+
+static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
+ u32 offset = pmcdev->map->lpm_status_offset;
+
+ pmc_core_lpm_display(pmcdev, NULL, s, offset, "STATUS", maps);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_sts_regs);
+
+static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
+ u32 offset = pmcdev->map->lpm_live_status_offset;
+
+ pmc_core_lpm_display(pmcdev, NULL, s, offset, "LIVE_STATUS", maps);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs);
+
+static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map *map = pmcdev->map->msr_sts;
+ u64 pcstate_count;
+ int index;
+
+ for (index = 0; map[index].name ; index++) {
+ if (rdmsrl_safe(map[index].bit_mask, &pcstate_count))
+ continue;
+
+ pcstate_count *= 1000;
+ do_div(pcstate_count, tsc_khz);
+ seq_printf(s, "%-8s : %llu\n", map[index].name,
+ pcstate_count);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc);
static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
{
debugfs_remove_recursive(pmcdev->dbgfs_dir);
}
-static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
+static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
{
struct dentry *dir;
dir = debugfs_create_dir("pmc_core", NULL);
- if (!dir)
- return -ENOMEM;
-
pmcdev->dbgfs_dir = dir;
debugfs_create_file("slp_s0_residency_usec", 0444, dir, pmcdev,
&pmc_core_dev_state);
- debugfs_create_file("pch_ip_power_gating_status", 0444, dir, pmcdev,
- &pmc_core_ppfear_ops);
+ if (pmcdev->map->pfear_sts)
+ debugfs_create_file("pch_ip_power_gating_status", 0444, dir,
+ pmcdev, &pmc_core_ppfear_fops);
debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
&pmc_core_ltr_ignore_ops);
+ debugfs_create_file("ltr_show", 0444, dir, pmcdev, &pmc_core_ltr_fops);
+
+ debugfs_create_file("package_cstate_show", 0444, dir, pmcdev,
+ &pmc_core_pkgc_fops);
+
if (pmcdev->map->pll_sts)
debugfs_create_file("pll_status", 0444, dir, pmcdev,
- &pmc_core_pll_ops);
+ &pmc_core_pll_fops);
if (pmcdev->map->mphy_sts)
debugfs_create_file("mphy_core_lanes_power_gating_status",
0444, dir, pmcdev,
- &pmc_core_mphy_pg_ops);
+ &pmc_core_mphy_pg_fops);
if (pmcdev->map->slps0_dbg_maps) {
debugfs_create_file("slp_s0_debug_status", 0444,
@@ -635,41 +1140,100 @@
dir, &slps0_dbg_latch);
}
- return 0;
-}
-#else
-static inline int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
-{
- return 0;
-}
+ if (pmcdev->map->lpm_en_offset) {
+ debugfs_create_file("substate_residencies", 0444,
+ pmcdev->dbgfs_dir, pmcdev,
+ &pmc_core_substate_res_fops);
+ }
-static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
-{
+ if (pmcdev->map->lpm_status_offset) {
+ debugfs_create_file("substate_status_registers", 0444,
+ pmcdev->dbgfs_dir, pmcdev,
+ &pmc_core_substate_sts_regs_fops);
+ debugfs_create_file("substate_live_status_registers", 0444,
+ pmcdev->dbgfs_dir, pmcdev,
+ &pmc_core_substate_l_sts_regs_fops);
+ }
}
-#endif /* CONFIG_DEBUG_FS */
static const struct x86_cpu_id intel_pmc_core_ids[] = {
- ICPU(INTEL_FAM6_SKYLAKE_MOBILE, &spt_reg_map),
- ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, &spt_reg_map),
- ICPU(INTEL_FAM6_KABYLAKE_MOBILE, &spt_reg_map),
- ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, &spt_reg_map),
- ICPU(INTEL_FAM6_CANNONLAKE_MOBILE, &cnp_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &spt_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &spt_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &spt_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &spt_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &cnp_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &icl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &cnp_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &cnp_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &tgl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &tgl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &tgl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &icl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &tgl_reg_map),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids);
static const struct pci_device_id pmc_pci_ids[] = {
- { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), 0},
- { 0, },
+ { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) },
+ { }
};
-static int __init pmc_core_probe(void)
+/*
+ * This quirk can be used on those platforms where
+ * the platform BIOS enforces 24Mhz crystal to shutdown
+ * before PMC can assert SLP_S0#.
+ */
+static bool xtal_ignore;
+static int quirk_xtal_ignore(const struct dmi_system_id *id)
{
+ xtal_ignore = true;
+ return 0;
+}
+
+static void pmc_core_xtal_ignore(struct pmc_dev *pmcdev)
+{
+ u32 value;
+
+ value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset);
+ /* 24MHz Crystal Shutdown Qualification Disable */
+ value |= SPT_PMC_VRIC1_XTALSDQDIS;
+ /* Low Voltage Mode Enable */
+ value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
+ pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value);
+}
+
+static const struct dmi_system_id pmc_core_dmi_table[] = {
+ {
+ .callback = quirk_xtal_ignore,
+ .ident = "HP Elite x2 1013 G3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite x2 1013 G3"),
+ },
+ },
+ {}
+};
+
+static void pmc_core_do_dmi_quirks(struct pmc_dev *pmcdev)
+{
+ dmi_check_system(pmc_core_dmi_table);
+
+ if (xtal_ignore)
+ pmc_core_xtal_ignore(pmcdev);
+}
+
+static int pmc_core_probe(struct platform_device *pdev)
+{
+ static bool device_initialized;
struct pmc_dev *pmcdev = &pmc;
const struct x86_cpu_id *cpu_id;
u64 slp_s0_addr;
- int err;
+
+ if (device_initialized)
+ return -ENODEV;
cpu_id = x86_match_cpu(intel_pmc_core_ids);
if (!cpu_id)
@@ -678,8 +1242,8 @@
pmcdev->map = (struct pmc_reg_map *)cpu_id->driver_data;
/*
- * Coffeelake has CPU ID of Kabylake and Cannonlake PCH. So here
- * Sunrisepoint PCH regmap can't be used. Use Cannonlake PCH regmap
+ * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
+ * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
* in this case.
*/
if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids))
@@ -700,29 +1264,145 @@
return -ENOMEM;
mutex_init(&pmcdev->lock);
+ platform_set_drvdata(pdev, pmcdev);
pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
+ pmc_core_do_dmi_quirks(pmcdev);
- err = pmc_core_dbgfs_register(pmcdev);
- if (err < 0) {
- pr_warn(" debugfs register failed.\n");
- iounmap(pmcdev->regbase);
- return err;
+ /*
+ * On TGL, due to a hardware limitation, the GBE LTR blocks PC10 when
+ * a cable is attached. Tell the PMC to ignore it.
+ */
+ if (pmcdev->map == &tgl_reg_map) {
+ dev_dbg(&pdev->dev, "ignoring GBE LTR\n");
+ pmc_core_send_ltr_ignore(3);
}
- pr_info(" initialized\n");
+ pmc_core_dbgfs_register(pmcdev);
+
+ device_initialized = true;
+ dev_info(&pdev->dev, " initialized\n");
+
return 0;
}
-module_init(pmc_core_probe)
-static void __exit pmc_core_remove(void)
+static int pmc_core_remove(struct platform_device *pdev)
{
- struct pmc_dev *pmcdev = &pmc;
+ struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
pmc_core_dbgfs_unregister(pmcdev);
+ platform_set_drvdata(pdev, NULL);
mutex_destroy(&pmcdev->lock);
iounmap(pmcdev->regbase);
+ return 0;
}
-module_exit(pmc_core_remove)
+
+static bool warn_on_s0ix_failures;
+module_param(warn_on_s0ix_failures, bool, 0644);
+MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures");
+
+static __maybe_unused int pmc_core_suspend(struct device *dev)
+{
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+
+ pmcdev->check_counters = false;
+
+ /* No warnings on S0ix failures */
+ if (!warn_on_s0ix_failures)
+ return 0;
+
+ /* Check if the syspend will actually use S0ix */
+ if (pm_suspend_via_firmware())
+ return 0;
+
+ /* Save PC10 residency for checking later */
+ if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pmcdev->pc10_counter))
+ return -EIO;
+
+ /* Save S0ix residency for checking later */
+ if (pmc_core_dev_state_get(pmcdev, &pmcdev->s0ix_counter))
+ return -EIO;
+
+ pmcdev->check_counters = true;
+ return 0;
+}
+
+static inline bool pmc_core_is_pc10_failed(struct pmc_dev *pmcdev)
+{
+ u64 pc10_counter;
+
+ if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pc10_counter))
+ return false;
+
+ if (pc10_counter == pmcdev->pc10_counter)
+ return true;
+
+ return false;
+}
+
+static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev)
+{
+ u64 s0ix_counter;
+
+ if (pmc_core_dev_state_get(pmcdev, &s0ix_counter))
+ return false;
+
+ if (s0ix_counter == pmcdev->s0ix_counter)
+ return true;
+
+ return false;
+}
+
+static __maybe_unused int pmc_core_resume(struct device *dev)
+{
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+ const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
+ int offset = pmcdev->map->lpm_status_offset;
+
+ if (!pmcdev->check_counters)
+ return 0;
+
+ if (!pmc_core_is_s0ix_failed(pmcdev))
+ return 0;
+
+ if (pmc_core_is_pc10_failed(pmcdev)) {
+ /* S0ix failed because of PC10 entry failure */
+ dev_info(dev, "CPU did not enter PC10!!! (PC10 cnt=0x%llx)\n",
+ pmcdev->pc10_counter);
+ return 0;
+ }
+
+ /* The real interesting case - S0ix failed - lets ask PMC why. */
+ dev_warn(dev, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n",
+ pmcdev->s0ix_counter);
+ if (pmcdev->map->slps0_dbg_maps)
+ pmc_core_slps0_display(pmcdev, dev, NULL);
+ if (pmcdev->map->lpm_sts)
+ pmc_core_lpm_display(pmcdev, dev, NULL, offset, "STATUS", maps);
+
+ return 0;
+}
+
+static const struct dev_pm_ops pmc_core_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume)
+};
+
+static const struct acpi_device_id pmc_core_acpi_ids[] = {
+ {"INT33A1", 0}, /* _HID for Intel Power Engine, _CID PNP0D80*/
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, pmc_core_acpi_ids);
+
+static struct platform_driver pmc_core_driver = {
+ .driver = {
+ .name = "intel_pmc_core",
+ .acpi_match_table = ACPI_PTR(pmc_core_acpi_ids),
+ .pm = &pmc_core_pm_ops,
+ },
+ .probe = pmc_core_probe,
+ .remove = pmc_core_remove,
+};
+
+module_platform_driver(pmc_core_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Intel PMC Core Driver");
--
Gitblit v1.6.2