From 748e4f3d702def1a4bff191e0cf93b6a05340f01 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Fri, 10 May 2024 07:41:34 +0000 Subject: [PATCH] add gpio led uart --- kernel/drivers/edac/ghes_edac.c | 459 ++++++++++++++++++++++++++++++++++++-------------------- 1 files changed, 295 insertions(+), 164 deletions(-) diff --git a/kernel/drivers/edac/ghes_edac.c b/kernel/drivers/edac/ghes_edac.c index 78c339d..df5897c 100644 --- a/kernel/drivers/edac/ghes_edac.c +++ b/kernel/drivers/edac/ghes_edac.c @@ -1,12 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * GHES/EDAC Linux driver * - * This file may be distributed under the terms of the GNU General Public - * License version 2. - * * Copyright (c) 2013 by Mauro Carvalho Chehab * - * Red Hat Inc. http://www.redhat.com + * Red Hat Inc. https://www.redhat.com */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -17,19 +15,34 @@ #include "edac_module.h" #include <ras/ras_event.h> -struct ghes_edac_pvt { - struct list_head list; - struct ghes *ghes; +struct ghes_pvt { struct mem_ctl_info *mci; /* Buffers for the error handling routine */ - char detail_location[240]; - char other_detail[160]; + char other_detail[400]; char msg[80]; }; -static atomic_t ghes_init = ATOMIC_INIT(0); -static struct ghes_edac_pvt *ghes_pvt; +static refcount_t ghes_refcount = REFCOUNT_INIT(0); + +/* + * Access to ghes_pvt must be protected by ghes_lock. The spinlock + * also provides the necessary (implicit) memory barrier for the SMP + * case to make the pointer visible on another CPU. + */ +static struct ghes_pvt *ghes_pvt; + +/* + * This driver's representation of the system hardware, as collected + * from DMI. + */ +struct ghes_hw_desc { + int num_dimms; + struct dimm_info *dimms; +} ghes_hw; + +/* GHES registration mutex */ +static DEFINE_MUTEX(ghes_reg_mutex); /* * Sync with other, potentially concurrent callers of @@ -41,6 +54,8 @@ /* "ghes_edac.force_load=1" skips the platform check */ static bool __read_mostly force_load; module_param(force_load, bool, 0); + +static bool system_scanned; /* Memory Device - Type 17 of SMBIOS spec */ struct memdev_dmi_entry { @@ -68,131 +83,170 @@ u16 conf_mem_clk_speed; } __attribute__((__packed__)); -struct ghes_edac_dimm_fill { - struct mem_ctl_info *mci; - unsigned count; -}; - -static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg) +static struct dimm_info *find_dimm_by_handle(struct mem_ctl_info *mci, u16 handle) { - int *num_dimm = arg; + struct dimm_info *dimm; - if (dh->type == DMI_ENTRY_MEM_DEVICE) - (*num_dimm)++; + mci_for_each_dimm(mci, dimm) { + if (dimm->smbios_handle == handle) + return dimm; + } + + return NULL; } -static void ghes_edac_dmidecode(const struct dmi_header *dh, void *arg) +static void dimm_setup_label(struct dimm_info *dimm, u16 handle) { - struct ghes_edac_dimm_fill *dimm_fill = arg; - struct mem_ctl_info *mci = dimm_fill->mci; + const char *bank = NULL, *device = NULL; - if (dh->type == DMI_ENTRY_MEM_DEVICE) { - struct memdev_dmi_entry *entry = (struct memdev_dmi_entry *)dh; - struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, - mci->n_layers, - dimm_fill->count, 0, 0); - u16 rdr_mask = BIT(7) | BIT(13); + dmi_memdev_name(handle, &bank, &device); - if (entry->size == 0xffff) { - pr_info("Can't get DIMM%i size\n", - dimm_fill->count); - dimm->nr_pages = MiB_TO_PAGES(32);/* Unknown */ - } else if (entry->size == 0x7fff) { - dimm->nr_pages = MiB_TO_PAGES(entry->extended_size); - } else { - if (entry->size & BIT(15)) - dimm->nr_pages = MiB_TO_PAGES((entry->size & 0x7fff) << 10); - else - dimm->nr_pages = MiB_TO_PAGES(entry->size); - } + /* + * Set to a NULL string when both bank and device are zero. In this case, + * the label assigned by default will be preserved. + */ + snprintf(dimm->label, sizeof(dimm->label), "%s%s%s", + (bank && *bank) ? bank : "", + (bank && *bank && device && *device) ? " " : "", + (device && *device) ? device : ""); +} - switch (entry->memory_type) { - case 0x12: - if (entry->type_detail & BIT(13)) - dimm->mtype = MEM_RDDR; - else - dimm->mtype = MEM_DDR; - break; - case 0x13: - if (entry->type_detail & BIT(13)) - dimm->mtype = MEM_RDDR2; - else - dimm->mtype = MEM_DDR2; - break; - case 0x14: - dimm->mtype = MEM_FB_DDR2; - break; - case 0x18: - if (entry->type_detail & BIT(12)) - dimm->mtype = MEM_NVDIMM; - else if (entry->type_detail & BIT(13)) - dimm->mtype = MEM_RDDR3; - else - dimm->mtype = MEM_DDR3; - break; - case 0x1a: - if (entry->type_detail & BIT(12)) - dimm->mtype = MEM_NVDIMM; - else if (entry->type_detail & BIT(13)) - dimm->mtype = MEM_RDDR4; - else - dimm->mtype = MEM_DDR4; - break; - default: - if (entry->type_detail & BIT(6)) - dimm->mtype = MEM_RMBS; - else if ((entry->type_detail & rdr_mask) == rdr_mask) - dimm->mtype = MEM_RDR; - else if (entry->type_detail & BIT(7)) - dimm->mtype = MEM_SDR; - else if (entry->type_detail & BIT(9)) - dimm->mtype = MEM_EDO; - else - dimm->mtype = MEM_UNKNOWN; - } +static void assign_dmi_dimm_info(struct dimm_info *dimm, struct memdev_dmi_entry *entry) +{ + u16 rdr_mask = BIT(7) | BIT(13); - /* - * Actually, we can only detect if the memory has bits for - * checksum or not - */ - if (entry->total_width == entry->data_width) - dimm->edac_mode = EDAC_NONE; + if (entry->size == 0xffff) { + pr_info("Can't get DIMM%i size\n", dimm->idx); + dimm->nr_pages = MiB_TO_PAGES(32);/* Unknown */ + } else if (entry->size == 0x7fff) { + dimm->nr_pages = MiB_TO_PAGES(entry->extended_size); + } else { + if (entry->size & BIT(15)) + dimm->nr_pages = MiB_TO_PAGES((entry->size & 0x7fff) << 10); else - dimm->edac_mode = EDAC_SECDED; + dimm->nr_pages = MiB_TO_PAGES(entry->size); + } - dimm->dtype = DEV_UNKNOWN; - dimm->grain = 128; /* Likely, worse case */ + switch (entry->memory_type) { + case 0x12: + if (entry->type_detail & BIT(13)) + dimm->mtype = MEM_RDDR; + else + dimm->mtype = MEM_DDR; + break; + case 0x13: + if (entry->type_detail & BIT(13)) + dimm->mtype = MEM_RDDR2; + else + dimm->mtype = MEM_DDR2; + break; + case 0x14: + dimm->mtype = MEM_FB_DDR2; + break; + case 0x18: + if (entry->type_detail & BIT(12)) + dimm->mtype = MEM_NVDIMM; + else if (entry->type_detail & BIT(13)) + dimm->mtype = MEM_RDDR3; + else + dimm->mtype = MEM_DDR3; + break; + case 0x1a: + if (entry->type_detail & BIT(12)) + dimm->mtype = MEM_NVDIMM; + else if (entry->type_detail & BIT(13)) + dimm->mtype = MEM_RDDR4; + else + dimm->mtype = MEM_DDR4; + break; + default: + if (entry->type_detail & BIT(6)) + dimm->mtype = MEM_RMBS; + else if ((entry->type_detail & rdr_mask) == rdr_mask) + dimm->mtype = MEM_RDR; + else if (entry->type_detail & BIT(7)) + dimm->mtype = MEM_SDR; + else if (entry->type_detail & BIT(9)) + dimm->mtype = MEM_EDO; + else + dimm->mtype = MEM_UNKNOWN; + } - /* - * FIXME: It shouldn't be hard to also fill the DIMM labels - */ + /* + * Actually, we can only detect if the memory has bits for + * checksum or not + */ + if (entry->total_width == entry->data_width) + dimm->edac_mode = EDAC_NONE; + else + dimm->edac_mode = EDAC_SECDED; - if (dimm->nr_pages) { - edac_dbg(1, "DIMM%i: %s size = %d MB%s\n", - dimm_fill->count, edac_mem_types[dimm->mtype], - PAGES_TO_MiB(dimm->nr_pages), - (dimm->edac_mode != EDAC_NONE) ? "(ECC)" : ""); - edac_dbg(2, "\ttype %d, detail 0x%02x, width %d(total %d)\n", - entry->memory_type, entry->type_detail, - entry->total_width, entry->data_width); + dimm->dtype = DEV_UNKNOWN; + dimm->grain = 128; /* Likely, worse case */ + + dimm_setup_label(dimm, entry->handle); + + if (dimm->nr_pages) { + edac_dbg(1, "DIMM%i: %s size = %d MB%s\n", + dimm->idx, edac_mem_types[dimm->mtype], + PAGES_TO_MiB(dimm->nr_pages), + (dimm->edac_mode != EDAC_NONE) ? "(ECC)" : ""); + edac_dbg(2, "\ttype %d, detail 0x%02x, width %d(total %d)\n", + entry->memory_type, entry->type_detail, + entry->total_width, entry->data_width); + } + + dimm->smbios_handle = entry->handle; +} + +static void enumerate_dimms(const struct dmi_header *dh, void *arg) +{ + struct memdev_dmi_entry *entry = (struct memdev_dmi_entry *)dh; + struct ghes_hw_desc *hw = (struct ghes_hw_desc *)arg; + struct dimm_info *d; + + if (dh->type != DMI_ENTRY_MEM_DEVICE) + return; + + /* Enlarge the array with additional 16 */ + if (!hw->num_dimms || !(hw->num_dimms % 16)) { + struct dimm_info *new; + + new = krealloc(hw->dimms, (hw->num_dimms + 16) * sizeof(struct dimm_info), + GFP_KERNEL); + if (!new) { + WARN_ON_ONCE(1); + return; } - dimm_fill->count++; + hw->dimms = new; } + + d = &hw->dimms[hw->num_dimms]; + d->idx = hw->num_dimms; + + assign_dmi_dimm_info(d, entry); + + hw->num_dimms++; +} + +static void ghes_scan_system(void) +{ + if (system_scanned) + return; + + dmi_walk(enumerate_dimms, &ghes_hw); + + system_scanned = true; } void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) { - enum hw_event_mc_err_type type; struct edac_raw_error_desc *e; struct mem_ctl_info *mci; - struct ghes_edac_pvt *pvt = ghes_pvt; + struct ghes_pvt *pvt; unsigned long flags; char *p; - u8 grain_bits; - - if (!pvt) - return; /* * We can do the locking below because GHES defers error processing @@ -204,6 +258,10 @@ spin_lock_irqsave(&ghes_lock, flags); + pvt = ghes_pvt; + if (!pvt) + goto unlock; + mci = pvt->mci; e = &mci->error_desc; @@ -211,7 +269,6 @@ memset(e, 0, sizeof (*e)); e->error_count = 1; e->grain = 1; - strcpy(e->label, "unknown label"); e->msg = pvt->msg; e->other_detail = pvt->other_detail; e->top_layer = -1; @@ -222,17 +279,17 @@ switch (sev) { case GHES_SEV_CORRECTED: - type = HW_EVENT_ERR_CORRECTED; + e->type = HW_EVENT_ERR_CORRECTED; break; case GHES_SEV_RECOVERABLE: - type = HW_EVENT_ERR_UNCORRECTED; + e->type = HW_EVENT_ERR_UNCORRECTED; break; case GHES_SEV_PANIC: - type = HW_EVENT_ERR_FATAL; + e->type = HW_EVENT_ERR_FATAL; break; default: case GHES_SEV_NO: - type = HW_EVENT_ERR_INFO; + e->type = HW_EVENT_ERR_INFO; } edac_dbg(1, "error validation_bits: 0x%08llx\n", @@ -300,8 +357,8 @@ /* Error address */ if (mem_err->validation_bits & CPER_MEM_VALID_PA) { - e->page_frame_number = mem_err->physical_addr >> PAGE_SHIFT; - e->offset_in_page = mem_err->physical_addr & ~PAGE_MASK; + e->page_frame_number = PHYS_PFN(mem_err->physical_addr); + e->offset_in_page = offset_in_page(mem_err->physical_addr); } /* Error grain */ @@ -320,26 +377,52 @@ p += sprintf(p, "rank:%d ", mem_err->rank); if (mem_err->validation_bits & CPER_MEM_VALID_BANK) p += sprintf(p, "bank:%d ", mem_err->bank); - if (mem_err->validation_bits & CPER_MEM_VALID_ROW) - p += sprintf(p, "row:%d ", mem_err->row); + if (mem_err->validation_bits & CPER_MEM_VALID_BANK_GROUP) + p += sprintf(p, "bank_group:%d ", + mem_err->bank >> CPER_MEM_BANK_GROUP_SHIFT); + if (mem_err->validation_bits & CPER_MEM_VALID_BANK_ADDRESS) + p += sprintf(p, "bank_address:%d ", + mem_err->bank & CPER_MEM_BANK_ADDRESS_MASK); + if (mem_err->validation_bits & (CPER_MEM_VALID_ROW | CPER_MEM_VALID_ROW_EXT)) { + u32 row = mem_err->row; + + row |= cper_get_mem_extension(mem_err->validation_bits, mem_err->extended); + p += sprintf(p, "row:%d ", row); + } if (mem_err->validation_bits & CPER_MEM_VALID_COLUMN) p += sprintf(p, "col:%d ", mem_err->column); if (mem_err->validation_bits & CPER_MEM_VALID_BIT_POSITION) p += sprintf(p, "bit_pos:%d ", mem_err->bit_pos); if (mem_err->validation_bits & CPER_MEM_VALID_MODULE_HANDLE) { const char *bank = NULL, *device = NULL; + struct dimm_info *dimm; + dmi_memdev_name(mem_err->mem_dev_handle, &bank, &device); if (bank != NULL && device != NULL) p += sprintf(p, "DIMM location:%s %s ", bank, device); else p += sprintf(p, "DIMM DMI handle: 0x%.4x ", mem_err->mem_dev_handle); + + dimm = find_dimm_by_handle(mci, mem_err->mem_dev_handle); + if (dimm) { + e->top_layer = dimm->idx; + strcpy(e->label, dimm->label); + } } + if (mem_err->validation_bits & CPER_MEM_VALID_CHIP_ID) + p += sprintf(p, "chipID: %d ", + mem_err->extended >> CPER_MEM_CHIP_ID_SHIFT); if (p > e->location) *(p - 1) = '\0'; + if (!*e->label) + strcpy(e->label, "unknown memory"); + /* All other fields are mapped on e->other_detail */ p = pvt->other_detail; + p += snprintf(p, sizeof(pvt->other_detail), + "APEI location: %s ", e->location); if (mem_err->validation_bits & CPER_MEM_VALID_ERROR_STATUS) { u64 status = mem_err->error_status; @@ -413,21 +496,9 @@ if (p > pvt->other_detail) *(p - 1) = '\0'; - /* Sanity-check driver-supplied grain value. */ - if (WARN_ON_ONCE(!e->grain)) - e->grain = 1; + edac_raw_mc_handle_error(e); - grain_bits = fls_long(e->grain - 1); - - /* Generate the trace event */ - snprintf(pvt->detail_location, sizeof(pvt->detail_location), - "APEI location: %s %s", e->location, e->other_detail); - trace_mc_event(type, e->msg, e->label, e->error_count, - mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer, - (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page, - grain_bits, e->syndrome, pvt->detail_location); - - edac_raw_mc_handle_error(type, mci, e); +unlock: spin_unlock_irqrestore(&ghes_lock, flags); } @@ -442,11 +513,12 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev) { bool fake = false; - int rc, num_dimm = 0; struct mem_ctl_info *mci; + struct ghes_pvt *pvt; struct edac_mc_layer layers[1]; - struct ghes_edac_dimm_fill dimm_fill; + unsigned long flags; int idx = -1; + int rc = 0; if (IS_ENABLED(CONFIG_X86)) { /* Check if safe to enable on this system */ @@ -454,37 +526,40 @@ if (!force_load && idx < 0) return -ENODEV; } else { + force_load = true; idx = 0; } + + /* finish another registration/unregistration instance first */ + mutex_lock(&ghes_reg_mutex); /* * We have only one logical memory controller to which all DIMMs belong. */ - if (atomic_inc_return(&ghes_init) > 1) - return 0; + if (refcount_inc_not_zero(&ghes_refcount)) + goto unlock; - /* Get the number of DIMMs */ - dmi_walk(ghes_edac_count_dimms, &num_dimm); + ghes_scan_system(); /* Check if we've got a bogus BIOS */ - if (num_dimm == 0) { + if (!ghes_hw.num_dimms) { fake = true; - num_dimm = 1; + ghes_hw.num_dimms = 1; } layers[0].type = EDAC_MC_LAYER_ALL_MEM; - layers[0].size = num_dimm; + layers[0].size = ghes_hw.num_dimms; layers[0].is_virt_csrow = true; - mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct ghes_edac_pvt)); + mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct ghes_pvt)); if (!mci) { pr_info("Can't allocate memory for EDAC data\n"); - return -ENOMEM; + rc = -ENOMEM; + goto unlock; } - ghes_pvt = mci->pvt_info; - ghes_pvt->ghes = ghes; - ghes_pvt->mci = mci; + pvt = mci->pvt_info; + pvt->mci = mci; mci->pdev = dev; mci->mtype_cap = MEM_FLAG_EMPTY; @@ -504,16 +579,36 @@ pr_info("So, the end result of using this driver varies from vendor to vendor.\n"); pr_info("If you find incorrect reports, please contact your hardware vendor\n"); pr_info("to correct its BIOS.\n"); - pr_info("This system has %d DIMM sockets.\n", num_dimm); + pr_info("This system has %d DIMM sockets.\n", ghes_hw.num_dimms); } if (!fake) { - dimm_fill.count = 0; - dimm_fill.mci = mci; - dmi_walk(ghes_edac_dmidecode, &dimm_fill); + struct dimm_info *src, *dst; + int i = 0; + + mci_for_each_dimm(mci, dst) { + src = &ghes_hw.dimms[i]; + + dst->idx = src->idx; + dst->smbios_handle = src->smbios_handle; + dst->nr_pages = src->nr_pages; + dst->mtype = src->mtype; + dst->edac_mode = src->edac_mode; + dst->dtype = src->dtype; + dst->grain = src->grain; + + /* + * If no src->label, preserve default label assigned + * from EDAC core. + */ + if (strlen(src->label)) + memcpy(dst->label, src->label, sizeof(src->label)); + + i++; + } + } else { - struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, - mci->n_layers, 0, 0, 0); + struct dimm_info *dimm = edac_get_dimm(mci, 0, 0, 0); dimm->nr_pages = 1; dimm->grain = 128; @@ -524,25 +619,61 @@ rc = edac_mc_add_mc(mci); if (rc < 0) { - pr_info("Can't register at EDAC core\n"); + pr_info("Can't register with the EDAC core\n"); edac_mc_free(mci); - return -ENODEV; + rc = -ENODEV; + goto unlock; } - return 0; + + spin_lock_irqsave(&ghes_lock, flags); + ghes_pvt = pvt; + spin_unlock_irqrestore(&ghes_lock, flags); + + /* only set on success */ + refcount_set(&ghes_refcount, 1); + +unlock: + + /* Not needed anymore */ + kfree(ghes_hw.dimms); + ghes_hw.dimms = NULL; + + mutex_unlock(&ghes_reg_mutex); + + return rc; } void ghes_edac_unregister(struct ghes *ghes) { struct mem_ctl_info *mci; + unsigned long flags; - if (!ghes_pvt) + if (!force_load) return; - if (atomic_dec_return(&ghes_init)) - return; + mutex_lock(&ghes_reg_mutex); - mci = ghes_pvt->mci; + system_scanned = false; + memset(&ghes_hw, 0, sizeof(struct ghes_hw_desc)); + + if (!refcount_dec_and_test(&ghes_refcount)) + goto unlock; + + /* + * Wait for the irq handler being finished. + */ + spin_lock_irqsave(&ghes_lock, flags); + mci = ghes_pvt ? ghes_pvt->mci : NULL; ghes_pvt = NULL; - edac_mc_del_mc(mci->pdev); - edac_mc_free(mci); + spin_unlock_irqrestore(&ghes_lock, flags); + + if (!mci) + goto unlock; + + mci = edac_mc_del_mc(mci->pdev); + if (mci) + edac_mc_free(mci); + +unlock: + mutex_unlock(&ghes_reg_mutex); } -- Gitblit v1.6.2