diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-04 17:21:59 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-04 17:21:59 -0700 |
commit | d782cebd6b39b4caab8a913180c0acfd6c33e9c2 (patch) | |
tree | e8ed959e9475f57bf7f2a0753e5a0f7cf04c8f75 /drivers | |
parent | 8556d44fee6ded9f4287d7ff7b5cc9d8635b0be0 (diff) | |
parent | c3107e3c504d3187ed8eac8179494946faff1481 (diff) |
Merge branch 'x86-ras-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RAS updates from Ingo Molnar:
"The main changes in this cycle are:
- RAS tracing/events infrastructure, by Gong Chen.
- Various generalizations of the APEI code to make it available to
non-x86 architectures, by Tomasz Nowicki"
* 'x86-ras-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/ras: Fix build warnings in <linux/aer.h>
acpi, apei, ghes: Factor out ioremap virtual memory for IRQ and NMI context.
acpi, apei, ghes: Make NMI error notification to be GHES architecture extension.
apei, mce: Factor out APEI architecture specific MCE calls.
RAS, extlog: Adjust init flow
trace, eMCA: Add a knob to adjust where to save event log
trace, RAS: Add eMCA trace event interface
RAS, debugfs: Add debugfs interface for RAS subsystem
CPER: Adjust code flow of some functions
x86, MCE: Robustify mcheck_init_device
trace, AER: Move trace into unified interface
trace, RAS: Add basic RAS trace event
x86, MCE: Kill CPU_POST_DEAD
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/Kconfig | 2 | ||||
-rw-r--r-- | drivers/Makefile | 1 | ||||
-rw-r--r-- | drivers/acpi/Kconfig | 4 | ||||
-rw-r--r-- | drivers/acpi/acpi_extlog.c | 46 | ||||
-rw-r--r-- | drivers/acpi/apei/Kconfig | 8 | ||||
-rw-r--r-- | drivers/acpi/apei/apei-base.c | 13 | ||||
-rw-r--r-- | drivers/acpi/apei/ghes.c | 173 | ||||
-rw-r--r-- | drivers/acpi/apei/hest.c | 29 | ||||
-rw-r--r-- | drivers/edac/Kconfig | 1 | ||||
-rw-r--r-- | drivers/edac/edac_mc.c | 3 | ||||
-rw-r--r-- | drivers/firmware/efi/cper.c | 192 | ||||
-rw-r--r-- | drivers/pci/pcie/aer/Kconfig | 1 | ||||
-rw-r--r-- | drivers/pci/pcie/aer/aerdrv_errprint.c | 4 | ||||
-rw-r--r-- | drivers/ras/Kconfig | 2 | ||||
-rw-r--r-- | drivers/ras/Makefile | 1 | ||||
-rw-r--r-- | drivers/ras/debugfs.c | 56 | ||||
-rw-r--r-- | drivers/ras/ras.c | 29 |
17 files changed, 400 insertions, 165 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index 0e87a34b6472..4e6e66c3c8d6 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -176,4 +176,6 @@ source "drivers/powercap/Kconfig" source "drivers/mcb/Kconfig" +source "drivers/ras/Kconfig" + endmenu diff --git a/drivers/Makefile b/drivers/Makefile index f98b50d8251d..65c32b1cea3d 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -158,3 +158,4 @@ obj-$(CONFIG_NTB) += ntb/ obj-$(CONFIG_FMC) += fmc/ obj-$(CONFIG_POWERCAP) += powercap/ obj-$(CONFIG_MCB) += mcb/ +obj-$(CONFIG_RAS) += ras/ diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index a34a22841002..206942b8d105 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -370,6 +370,7 @@ config ACPI_EXTLOG tristate "Extended Error Log support" depends on X86_MCE && X86_LOCAL_APIC select UEFI_CPER + select RAS default n help Certain usages such as Predictive Failure Analysis (PFA) require @@ -384,6 +385,7 @@ config ACPI_EXTLOG Enhanced MCA Logging allows firmware to provide additional error information to system software, synchronous with MCE or CMCI. This - driver adds support for that functionality. + driver adds support for that functionality with corresponding + tracepoint which carries that information to userspace. endif # ACPI diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c index 185334114d71..0ad6f389d922 100644 --- a/drivers/acpi/acpi_extlog.c +++ b/drivers/acpi/acpi_extlog.c @@ -12,10 +12,12 @@ #include <linux/cper.h> #include <linux/ratelimit.h> #include <linux/edac.h> +#include <linux/ras.h> #include <asm/cpu.h> #include <asm/mce.h> #include "apei/apei-internal.h" +#include <ras/ras_event.h> #define EXT_ELOG_ENTRY_MASK GENMASK_ULL(51, 0) /* elog entry address mask */ @@ -137,8 +139,12 @@ static int extlog_print(struct notifier_block *nb, unsigned long val, struct mce *mce = (struct mce *)data; int bank = mce->bank; int cpu = mce->extcpu; - struct acpi_generic_status *estatus; - int rc; + struct acpi_generic_status *estatus, *tmp; + struct acpi_generic_data *gdata; + const uuid_le *fru_id = &NULL_UUID_LE; + char *fru_text = ""; + uuid_le *sec_type; + static u32 err_seq; estatus = extlog_elog_entry_check(cpu, bank); if (estatus == NULL) @@ -148,8 +154,29 @@ static int extlog_print(struct notifier_block *nb, unsigned long val, /* clear record status to enable BIOS to update it again */ estatus->block_status = 0; - rc = print_extlog_rcd(NULL, (struct acpi_generic_status *)elog_buf, cpu); + tmp = (struct acpi_generic_status *)elog_buf; + + if (!ras_userspace_consumers()) { + print_extlog_rcd(NULL, tmp, cpu); + goto out; + } + + /* log event via trace */ + err_seq++; + gdata = (struct acpi_generic_data *)(tmp + 1); + if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID) + fru_id = (uuid_le *)gdata->fru_id; + if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT) + fru_text = gdata->fru_text; + sec_type = (uuid_le *)gdata->section_type; + if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) { + struct cper_sec_mem_err *mem = (void *)(gdata + 1); + if (gdata->error_data_length >= sizeof(*mem)) + trace_extlog_mem_event(mem, err_seq, fru_id, fru_text, + (u8)gdata->error_severity); + } +out: return NOTIFY_STOP; } @@ -196,19 +223,16 @@ static int __init extlog_init(void) u64 cap; int rc; + rdmsrl(MSR_IA32_MCG_CAP, cap); + + if (!(cap & MCG_ELOG_P) || !extlog_get_l1addr()) + return -ENODEV; + if (get_edac_report_status() == EDAC_REPORTING_FORCE) { pr_warn("Not loading eMCA, error reporting force-enabled through EDAC.\n"); return -EPERM; } - rc = -ENODEV; - rdmsrl(MSR_IA32_MCG_CAP, cap); - if (!(cap & MCG_ELOG_P)) - return rc; - - if (!extlog_get_l1addr()) - return rc; - rc = -EINVAL; /* get L1 header to fetch necessary information */ l1_hdr_size = sizeof(struct extlog_l1_head); diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig index c4dac7150960..b0140c8fc733 100644 --- a/drivers/acpi/apei/Kconfig +++ b/drivers/acpi/apei/Kconfig @@ -1,9 +1,15 @@ +config HAVE_ACPI_APEI + bool + +config HAVE_ACPI_APEI_NMI + bool + config ACPI_APEI bool "ACPI Platform Error Interface (APEI)" select MISC_FILESYSTEMS select PSTORE select UEFI_CPER - depends on X86 + depends on HAVE_ACPI_APEI help APEI allows to report errors (for example from the chipset) to the operating system. This improves NMI handling diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index 8678dfe5366b..2cd7bdd6c8b3 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c @@ -745,6 +745,19 @@ struct dentry *apei_get_debugfs_dir(void) } EXPORT_SYMBOL_GPL(apei_get_debugfs_dir); +int __weak arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, + void *data) +{ + return 1; +} +EXPORT_SYMBOL_GPL(arch_apei_enable_cmcff); + +void __weak arch_apei_report_mem_error(int sev, + struct cper_sec_mem_err *mem_err) +{ +} +EXPORT_SYMBOL_GPL(arch_apei_report_mem_error); + int apei_osc_setup(void) { static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c"; diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index dab7cb7349df..e05d84e7b06d 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -47,11 +47,11 @@ #include <linux/genalloc.h> #include <linux/pci.h> #include <linux/aer.h> +#include <linux/nmi.h> #include <acpi/ghes.h> -#include <asm/mce.h> +#include <acpi/apei.h> #include <asm/tlbflush.h> -#include <asm/nmi.h> #include "apei-internal.h" @@ -86,8 +86,6 @@ bool ghes_disable; module_param_named(disable, ghes_disable, bool, 0); -static int ghes_panic_timeout __read_mostly = 30; - /* * All error sources notified with SCI shares one notifier function, * so they need to be linked and checked one by one. This is applied @@ -97,16 +95,9 @@ static int ghes_panic_timeout __read_mostly = 30; * list changing, not for traversing. */ static LIST_HEAD(ghes_sci); -static LIST_HEAD(ghes_nmi); static DEFINE_MUTEX(ghes_list_mutex); /* - * NMI may be triggered on any CPU, so ghes_nmi_lock is used for - * mutual exclusion. - */ -static DEFINE_RAW_SPINLOCK(ghes_nmi_lock); - -/* * Because the memory area used to transfer hardware error information * from BIOS to Linux can be determined only in NMI, IRQ or timer * handler, but general ioremap can not be used in atomic context, so @@ -114,12 +105,16 @@ static DEFINE_RAW_SPINLOCK(ghes_nmi_lock); */ /* - * Two virtual pages are used, one for NMI context, the other for - * IRQ/PROCESS context + * Two virtual pages are used, one for IRQ/PROCESS context, the other for + * NMI context (optionally). */ -#define GHES_IOREMAP_PAGES 2 -#define GHES_IOREMAP_NMI_PAGE(base) (base) -#define GHES_IOREMAP_IRQ_PAGE(base) ((base) + PAGE_SIZE) +#ifdef CONFIG_HAVE_ACPI_APEI_NMI +#define GHES_IOREMAP_PAGES 2 +#else +#define GHES_IOREMAP_PAGES 1 +#endif +#define GHES_IOREMAP_IRQ_PAGE(base) (base) +#define GHES_IOREMAP_NMI_PAGE(base) ((base) + PAGE_SIZE) /* virtual memory area for atomic ioremap */ static struct vm_struct *ghes_ioremap_area; @@ -130,18 +125,8 @@ static struct vm_struct *ghes_ioremap_area; static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); -/* - * printk is not safe in NMI context. So in NMI handler, we allocate - * required memory from lock-less memory allocator - * (ghes_estatus_pool), save estatus into it, put them into lock-less - * list (ghes_estatus_llist), then delay printk into IRQ context via - * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record - * required pool size by all NMI error source. - */ static struct gen_pool *ghes_estatus_pool; static unsigned long ghes_estatus_pool_size_request; -static struct llist_head ghes_estatus_llist; -static struct irq_work ghes_proc_irq_work; struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE]; static atomic_t ghes_estatus_cache_alloced; @@ -192,7 +177,7 @@ static void ghes_iounmap_nmi(void __iomem *vaddr_ptr) BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base)); unmap_kernel_range_noflush(vaddr, PAGE_SIZE); - __flush_tlb_one(vaddr); + arch_apei_flush_tlb_one(vaddr); } static void ghes_iounmap_irq(void __iomem *vaddr_ptr) @@ -202,7 +187,7 @@ static void ghes_iounmap_irq(void __iomem *vaddr_ptr) BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base)); unmap_kernel_range_noflush(vaddr, PAGE_SIZE); - __flush_tlb_one(vaddr); + arch_apei_flush_tlb_one(vaddr); } static int ghes_estatus_pool_init(void) @@ -249,11 +234,6 @@ static int ghes_estatus_pool_expand(unsigned long len) return 0; } -static void ghes_estatus_pool_shrink(unsigned long len) -{ - ghes_estatus_pool_size_request -= PAGE_ALIGN(len); -} - static struct ghes *ghes_new(struct acpi_hest_generic *generic) { struct ghes *ghes; @@ -455,9 +435,7 @@ static void ghes_do_proc(struct ghes *ghes, mem_err = (struct cper_sec_mem_err *)(gdata+1); ghes_edac_report_mem_error(ghes, sev, mem_err); -#ifdef CONFIG_X86_MCE - apei_mce_report_mem_error(sev, mem_err); -#endif + arch_apei_report_mem_error(sev, mem_err); ghes_handle_memory_failure(gdata, sev); } #ifdef CONFIG_ACPI_APEI_PCIEAER @@ -734,6 +712,32 @@ static int ghes_notify_sci(struct notifier_block *this, return ret; } +static struct notifier_block ghes_notifier_sci = { + .notifier_call = ghes_notify_sci, +}; + +#ifdef CONFIG_HAVE_ACPI_APEI_NMI +/* + * printk is not safe in NMI context. So in NMI handler, we allocate + * required memory from lock-less memory allocator + * (ghes_estatus_pool), save estatus into it, put them into lock-less + * list (ghes_estatus_llist), then delay printk into IRQ context via + * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record + * required pool size by all NMI error source. + */ +static struct llist_head ghes_estatus_llist; +static struct irq_work ghes_proc_irq_work; + +/* + * NMI may be triggered on any CPU, so ghes_nmi_lock is used for + * mutual exclusion. + */ +static DEFINE_RAW_SPINLOCK(ghes_nmi_lock); + +static LIST_HEAD(ghes_nmi); + +static int ghes_panic_timeout __read_mostly = 30; + static struct llist_node *llist_nodes_reverse(struct llist_node *llnode) { struct llist_node *next, *tail = NULL; @@ -877,10 +881,6 @@ out: return ret; } -static struct notifier_block ghes_notifier_sci = { - .notifier_call = ghes_notify_sci, -}; - static unsigned long ghes_esource_prealloc_size( const struct acpi_hest_generic *generic) { @@ -896,11 +896,71 @@ static unsigned long ghes_esource_prealloc_size( return prealloc_size; } +static void ghes_estatus_pool_shrink(unsigned long len) +{ + ghes_estatus_pool_size_request -= PAGE_ALIGN(len); +} + +static void ghes_nmi_add(struct ghes *ghes) +{ + unsigned long len; + + len = ghes_esource_prealloc_size(ghes->generic); + ghes_estatus_pool_expand(len); + mutex_lock(&ghes_list_mutex); + if (list_empty(&ghes_nmi)) + register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, "ghes"); + list_add_rcu(&ghes->list, &ghes_nmi); + mutex_unlock(&ghes_list_mutex); +} + +static void ghes_nmi_remove(struct ghes *ghes) +{ + unsigned long len; + + mutex_lock(&ghes_list_mutex); + list_del_rcu(&ghes->list); + if (list_empty(&ghes_nmi)) + unregister_nmi_handler(NMI_LOCAL, "ghes"); + mutex_unlock(&ghes_list_mutex); + /* + * To synchronize with NMI handler, ghes can only be + * freed after NMI handler finishes. + */ + synchronize_rcu(); + len = ghes_esource_prealloc_size(ghes->generic); + ghes_estatus_pool_shrink(len); +} + +static void ghes_nmi_init_cxt(void) +{ + init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq); +} +#else /* CONFIG_HAVE_ACPI_APEI_NMI */ +static inline void ghes_nmi_add(struct ghes *ghes) +{ + pr_err(GHES_PFX "ID: %d, trying to add NMI notification which is not supported!\n", + ghes->generic->header.source_id); + BUG(); +} + +static inline void ghes_nmi_remove(struct ghes *ghes) +{ + pr_err(GHES_PFX "ID: %d, trying to remove NMI notification which is not supported!\n", + ghes->generic->header.source_id); + BUG(); +} + +static inline void ghes_nmi_init_cxt(void) +{ +} +#endif /* CONFIG_HAVE_ACPI_APEI_NMI */ + static int ghes_probe(struct platform_device *ghes_dev) { struct acpi_hest_generic *generic; struct ghes *ghes = NULL; - unsigned long len; + int rc = -EINVAL; generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data; @@ -911,7 +971,13 @@ static int ghes_probe(struct platform_device *ghes_dev) case ACPI_HEST_NOTIFY_POLLED: case ACPI_HEST_NOTIFY_EXTERNAL: case ACPI_HEST_NOTIFY_SCI: + break; case ACPI_HEST_NOTIFY_NMI: + if (!IS_ENABLED(CONFIG_HAVE_ACPI_APEI_NMI)) { + pr_warn(GHES_PFX "Generic hardware error source: %d notified via NMI interrupt is not supported!\n", + generic->header.source_id); + goto err; + } break; case ACPI_HEST_NOTIFY_LOCAL: pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n", @@ -972,14 +1038,7 @@ static int ghes_probe(struct platform_device *ghes_dev) mutex_unlock(&ghes_list_mutex); break; case ACPI_HEST_NOTIFY_NMI: - len = ghes_esource_prealloc_size(generic); - ghes_estatus_pool_expand(len); - mutex_lock(&ghes_list_mutex); - if (list_empty(&ghes_nmi)) - register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, - "ghes"); - list_add_rcu(&ghes->list, &ghes_nmi); - mutex_unlock(&ghes_list_mutex); + ghes_nmi_add(ghes); break; default: BUG(); @@ -1001,7 +1060,6 @@ static int ghes_remove(struct platform_device *ghes_dev) { struct ghes *ghes; struct acpi_hest_generic *generic; - unsigned long len; ghes = platform_get_drvdata(ghes_dev); generic = ghes->generic; @@ -1022,18 +1080,7 @@ static int ghes_remove(struct platform_device *ghes_dev) mutex_unlock(&ghes_list_mutex); break; case ACPI_HEST_NOTIFY_NMI: - mutex_lock(&ghes_list_mutex); - list_del_rcu(&ghes->list); - if (list_empty(&ghes_nmi)) - unregister_nmi_handler(NMI_LOCAL, "ghes"); - mutex_unlock(&ghes_list_mutex); - /* - * To synchronize with NMI handler, ghes can only be - * freed after NMI handler finishes. - */ - synchronize_rcu(); - len = ghes_esource_prealloc_size(generic); - ghes_estatus_pool_shrink(len); + ghes_nmi_remove(ghes); break; default: BUG(); @@ -1077,7 +1124,7 @@ static int __init ghes_init(void) return -EINVAL; } - init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq); + ghes_nmi_init_cxt(); rc = ghes_ioremap_init(); if (rc) diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index f5e37f32c71f..06e9b411a0a2 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c @@ -36,7 +36,6 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <acpi/apei.h> -#include <asm/mce.h> #include "apei-internal.h" @@ -128,33 +127,7 @@ EXPORT_SYMBOL_GPL(apei_hest_parse); */ static int __init hest_parse_cmc(struct acpi_hest_header *hest_hdr, void *data) { -#ifdef CONFIG_X86_MCE - int i; - struct acpi_hest_ia_corrected *cmc; - struct acpi_hest_ia_error_bank *mc_bank; - - if (hest_hdr->type != ACPI_HEST_TYPE_IA32_CORRECTED_CHECK) - return 0; - - cmc = (struct acpi_hest_ia_corrected *)hest_hdr; - if (!cmc->enabled) - return 0; - - /* - * We expect HEST to provide a list of MC banks that report errors - * in firmware first mode. Otherwise, return non-zero value to - * indicate that we are done parsing HEST. - */ - if (!(cmc->flags & ACPI_HEST_FIRMWARE_FIRST) || !cmc->num_hardware_banks) - return 1; - - pr_info(HEST_PFX "Enabling Firmware First mode for corrected errors.\n"); - - mc_bank = (struct acpi_hest_ia_error_bank *)(cmc + 1); - for (i = 0; i < cmc->num_hardware_banks; i++, mc_bank++) - mce_disable_bank(mc_bank->bank_number); -#endif - return 1; + return arch_apei_enable_cmcff(hest_hdr, data); } struct ghes_arr { diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index e339c6b91425..f8665f9c3e03 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -72,6 +72,7 @@ config EDAC_MCE_INJ config EDAC_MM_EDAC tristate "Main Memory EDAC (Error Detection And Correction) reporting" + select RAS help Some systems are able to detect and correct errors in main memory. EDAC can report statistics on memory error diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 2c694b5297cc..9f134823fa75 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -33,9 +33,6 @@ #include <asm/edac.h> #include "edac_core.h" #include "edac_module.h" - -#define CREATE_TRACE_POINTS -#define TRACE_INCLUDE_PATH ../../include/ras #include <ras/ras_event.h> /* lock to memory controller's control array */ diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 1491dd4f08f9..437e6fd47311 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -34,6 +34,9 @@ #include <linux/aer.h> #define INDENT_SP " " + +static char rcd_decode_str[CPER_REC_LEN]; + /* * CPER record ID need to be unique even after reboot, because record * ID is used as index for ERST storage, while CPER records from @@ -50,18 +53,19 @@ u64 cper_next_record_id(void) } EXPORT_SYMBOL_GPL(cper_next_record_id); -static const char *cper_severity_strs[] = { +static const char * const severity_strs[] = { "recoverable", "fatal", "corrected", "info", }; -static const char *cper_severity_str(unsigned int severity) +const char *cper_severity_str(unsigned int severity) { - return severity < ARRAY_SIZE(cper_severity_strs) ? - cper_severity_strs[severity] : "unknown"; + return severity < ARRAY_SIZE(severity_strs) ? + severity_strs[severity] : "unknown"; } +EXPORT_SYMBOL_GPL(cper_severity_str); /* * cper_print_bits - print strings for set bits @@ -100,32 +104,32 @@ void cper_print_bits(const char *pfx, unsigned int bits, printk("%s\n", buf); } -static const char * const cper_proc_type_strs[] = { +static const char * const proc_type_strs[] = { "IA32/X64", "IA64", }; -static const char * const cper_proc_isa_strs[] = { +static const char * const proc_isa_strs[] = { "IA32", "IA64", "X64", }; -static const char * const cper_proc_error_type_strs[] = { +static const char * const proc_error_type_strs[] = { "cache error", "TLB error", "bus error", "micro-architectural error", }; -static const char * const cper_proc_op_strs[] = { +static const char * const proc_op_strs[] = { "unknown or generic", "data read", "data write", "instruction execution", }; -static const char * const cper_proc_flag_strs[] = { +static const char * const proc_flag_strs[] = { "restartable", "precise IP", "overflow", @@ -137,26 +141,26 @@ static void cper_print_proc_generic(const char *pfx, { if (proc->validation_bits & CPER_PROC_VALID_TYPE) printk("%s""processor_type: %d, %s\n", pfx, proc->proc_type, - proc->proc_type < ARRAY_SIZE(cper_proc_type_strs) ? - cper_proc_type_strs[proc->proc_type] : "unknown"); + proc->proc_type < ARRAY_SIZE(proc_type_strs) ? + proc_type_strs[proc->proc_type] : "unknown"); if (proc->validation_bits & CPER_PROC_VALID_ISA) printk("%s""processor_isa: %d, %s\n", pfx, proc->proc_isa, - proc->proc_isa < ARRAY_SIZE(cper_proc_isa_strs) ? - cper_proc_isa_strs[proc->proc_isa] : "unknown"); + proc->proc_isa < ARRAY_SIZE(proc_isa_strs) ? + proc_isa_strs[proc->proc_isa] : "unknown"); if (proc->validation_bits & CPER_PROC_VALID_ERROR_TYPE) { printk("%s""error_type: 0x%02x\n", pfx, proc->proc_error_type); cper_print_bits(pfx, proc->proc_error_type, - cper_proc_error_type_strs, - ARRAY_SIZE(cper_proc_error_type_strs)); + proc_error_type_strs, + ARRAY_SIZE(proc_error_type_strs)); } if (proc->validation_bits & CPER_PROC_VALID_OPERATION) printk("%s""operation: %d, %s\n", pfx, proc->operation, - proc->operation < ARRAY_SIZE(cper_proc_op_strs) ? - cper_proc_op_strs[proc->operation] : "unknown"); + proc->operation < ARRAY_SIZE(proc_op_strs) ? + proc_op_strs[proc->operation] : "unknown"); if (proc->validation_bits & CPER_PROC_VALID_FLAGS) { printk("%s""flags: 0x%02x\n", pfx, proc->flags); - cper_print_bits(pfx, proc->flags, cper_proc_flag_strs, - ARRAY_SIZE(cper_proc_flag_strs)); + cper_print_bits(pfx, proc->flags, proc_flag_strs, + ARRAY_SIZE(proc_flag_strs)); } if (proc->validation_bits & CPER_PROC_VALID_LEVEL) printk("%s""level: %d\n", pfx, proc->level); @@ -177,7 +181,7 @@ static void cper_print_proc_generic(const char *pfx, printk("%s""IP: 0x%016llx\n", pfx, proc->ip); } -static const char *cper_mem_err_type_strs[] = { +static const char * const mem_err_type_strs[] = { "unknown", "no error", "single-bit ECC", @@ -196,58 +200,136 @@ static const char *cper_mem_err_type_strs[] = { "physical memory map-out event", }; -static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem) +const char *cper_mem_err_type_str(unsigned int etype) { - if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS) - printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status); - if (mem->validation_bits & CPER_MEM_VALID_PA) - printk("%s""physical_address: 0x%016llx\n", - pfx, mem->physical_addr); - if (mem->validation_bits & CPER_MEM_VALID_PA_MASK) - printk("%s""physical_address_mask: 0x%016llx\n", - pfx, mem->physical_addr_mask); + return etype < ARRAY_SIZE(mem_err_type_strs) ? + mem_err_type_strs[etype] : "unknown"; +} +EXPORT_SYMBOL_GPL(cper_mem_err_type_str); + +static int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg) +{ + u32 len, n; + + if (!msg) + return 0; + + n = 0; + len = CPER_REC_LEN - 1; if (mem->validation_bits & CPER_MEM_VALID_NODE) - pr_debug("node: %d\n", mem->node); + n += scnprintf(msg + n, len - n, "node: %d ", mem->node); if (mem->validation_bits & CPER_MEM_VALID_CARD) - pr_debug("card: %d\n", mem->card); + n += scnprintf(msg + n, len - n, "card: %d ", mem->card); if (mem->validation_bits & CPER_MEM_VALID_MODULE) - pr_debug("module: %d\n", mem->module); + n += scnprintf(msg + n, len - n, "module: %d ", mem->module); if (mem->validation_bits & CPER_MEM_VALID_RANK_NUMBER) - pr_debug("rank: %d\n", mem->rank); + n += scnprintf(msg + n, len - n, "rank: %d ", mem->rank); if (mem->validation_bits & CPER_MEM_VALID_BANK) - pr_debug("bank: %d\n", mem->bank); + n += scnprintf(msg + n, len - n, "bank: %d ", mem->bank); if (mem->validation_bits & CPER_MEM_VALID_DEVICE) - pr_debug("device: %d\n", mem->device); + n += scnprintf(msg + n, len - n, "device: %d ", mem->device); if (mem->validation_bits & CPER_MEM_VALID_ROW) - pr_debug("row: %d\n", mem->row); + n += scnprintf(msg + n, len - n, "row: %d ", mem->row); if (mem->validation_bits & CPER_MEM_VALID_COLUMN) - pr_debug("column: %d\n", mem->column); + n += scnprintf(msg + n, len - n, "column: %d ", mem->column); if (mem->validation_bits & CPER_MEM_VALID_BIT_POSITION) - pr_debug("bit_position: %d\n", mem->bit_pos); + n += scnprintf(msg + n, len - n, "bit_position: %d ", + mem->bit_pos); if (mem->validation_bits & CPER_MEM_VALID_REQUESTOR_ID) - pr_debug("requestor_id: 0x%016llx\n", mem->requestor_id); + n += scnprintf(msg + n, len - n, "requestor_id: 0x%016llx ", + mem->requestor_id); if (mem->validation_bits & CPER_MEM_VALID_RESPONDER_ID) - pr_debug("responder_id: 0x%016llx\n", mem->responder_id); + n += scnprintf(msg + n, len - n, "responder_id: 0x%016llx ", + mem->responder_id); if (mem->validation_bits & CPER_MEM_VALID_TARGET_ID) - pr_debug("target_id: 0x%016llx\n", mem->target_id); + scnprintf(msg + n, len - n, "target_id: 0x%016llx ", + mem->target_id); + + msg[n] = '\0'; + return n; +} + +static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg) +{ + u32 len, n; + const char *bank = NULL, *device = NULL; + + if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE)) + return 0; + + n = 0; + len = CPER_REC_LEN - 1; + dmi_memdev_name(mem->mem_dev_handle, &bank, &device); + if (bank && device) + n = snprintf(msg, len, "DIMM location: %s %s ", bank, device); + else + n = snprintf(msg, len, + "DIMM location: not present. DMI handle: 0x%.4x ", + mem->mem_dev_handle); + + msg[n] = '\0'; + return n; +} + +void cper_mem_err_pack(const struct cper_sec_mem_err *mem, + struct cper_mem_err_compact *cmem) +{ + cmem->validation_bits = mem->validation_bits; + cmem->node = mem->node; + cmem->card = mem->card; + cmem->module = mem->module; + cmem->bank = mem->bank; + cmem->device = mem->device; + cmem->row = mem->row; + cmem->column = mem->column; + cmem->bit_pos = mem->bit_pos; + cmem->requestor_id = mem->requestor_id; + cmem->responder_id = mem->responder_id; + cmem->target_id = mem->target_id; + cmem->rank = mem->rank; + cmem->mem_array_handle = mem->mem_array_handle; + cmem->mem_dev_handle = mem->mem_dev_handle; +} + +const char *cper_mem_err_unpack(struct trace_seq *p, + struct cper_mem_err_compact *cmem) +{ + const char *ret = p->buffer + p->len; + + if (cper_mem_err_location(cmem, rcd_decode_str)) + trace_seq_printf(p, "%s", rcd_decode_str); + if (cper_dimm_err_location(cmem, rcd_decode_str)) + trace_seq_printf(p, "%s", rcd_decode_str); + trace_seq_putc(p, '\0'); + + return ret; +} + +static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem) +{ + struct cper_mem_err_compact cmem; + + if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS) + printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status); + if (mem->validation_bits & CPER_MEM_VALID_PA) + printk("%s""physical_address: 0x%016llx\n", + pfx, mem->physical_addr); + if (mem->validation_bits & CPER_MEM_VALID_PA_MASK) + printk("%s""physical_address_mask: 0x%016llx\n", + pfx, mem->physical_addr_mask); + cper_mem_err_pack(mem, &cmem); + if (cper_mem_err_location(&cmem, rcd_decode_str)) + printk("%s%s\n", pfx, rcd_decode_str); if (mem->validation_bits & CPER_MEM_VALID_ERROR_TYPE) { u8 etype = mem->error_type; printk("%s""error_type: %d, %s\n", pfx, etype, - etype < ARRAY_SIZE(cper_mem_err_type_strs) ? - cper_mem_err_type_strs[etype] : "unknown"); - } - if (mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE) { - const char *bank = NULL, *device = NULL; - dmi_memdev_name(mem->mem_dev_handle, &bank, &device); - if (bank != NULL && device != NULL) - printk("%s""DIMM location: %s %s", pfx, bank, device); - else - printk("%s""DIMM DMI handle: 0x%.4x", - pfx, mem->mem_dev_handle); + cper_mem_err_type_str(etype)); } + if (cper_dimm_err_location(&cmem, rcd_decode_str)) + printk("%s%s\n", pfx, rcd_decode_str); } -static const char *cper_pcie_port_type_strs[] = { +static const char * const pcie_port_type_strs[] = { "PCIe end point", "legacy PCI end point", "unknown", @@ -266,8 +348,8 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie, { if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE) printk("%s""port_type: %d, %s\n", pfx, pcie->port_type, - pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ? - cper_pcie_port_type_strs[pcie->port_type] : "unknown"); + pcie->port_type < ARRAY_SIZE(pcie_port_type_strs) ? + pcie_port_type_strs[pcie->port_type] : "unknown"); if (pcie->validation_bits & CPER_PCIE_VALID_VERSION) printk("%s""version: %d.%d\n", pfx, pcie->version.major, pcie->version.minor); diff --git a/drivers/pci/pcie/aer/Kconfig b/drivers/pci/pcie/aer/Kconfig index 50e94e02378a..389440228c1d 100644 --- a/drivers/pci/pcie/aer/Kconfig +++ b/drivers/pci/pcie/aer/Kconfig @@ -5,6 +5,7 @@ config PCIEAER boolean "Root Port Advanced Error Reporting support" depends on PCIEPORTBUS + select RAS default y help This enables PCI Express Root Port Advanced Error Reporting diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c index 36ed31b52198..35d06e177917 100644 --- a/drivers/pci/pcie/aer/aerdrv_errprint.c +++ b/drivers/pci/pcie/aer/aerdrv_errprint.c @@ -22,9 +22,7 @@ #include <linux/cper.h> #include "aerdrv.h" - -#define CREATE_TRACE_POINTS -#include <trace/events/ras.h> +#include <ras/ras_event.h> #define AER_AGENT_RECEIVER 0 #define AER_AGENT_REQUESTER 1 diff --git a/drivers/ras/Kconfig b/drivers/ras/Kconfig new file mode 100644 index 000000000000..f9da613052c2 --- /dev/null +++ b/drivers/ras/Kconfig @@ -0,0 +1,2 @@ +config RAS + bool diff --git a/drivers/ras/Makefile b/drivers/ras/Makefile new file mode 100644 index 000000000000..d7f73341ced3 --- /dev/null +++ b/drivers/ras/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_RAS) += ras.o debugfs.o diff --git a/drivers/ras/debugfs.c b/drivers/ras/debugfs.c new file mode 100644 index 000000000000..0322acf67ea5 --- /dev/null +++ b/drivers/ras/debugfs.c @@ -0,0 +1,56 @@ +#include <linux/debugfs.h> + +static struct dentry *ras_debugfs_dir; + +static atomic_t trace_count = ATOMIC_INIT(0); + +int ras_userspace_consumers(void) +{ + return atomic_read(&trace_count); +} +EXPORT_SYMBOL_GPL(ras_userspace_consumers); + +static int trace_show(struct seq_file *m, void *v) +{ + return atomic_read(&trace_count); +} + +static int trace_open(struct inode *inode, struct file *file) +{ + atomic_inc(&trace_count); + return single_open(file, trace_show, NULL); +} + +static int trace_release(struct inode *inode, struct file *file) +{ + atomic_dec(&trace_count); + return single_release(inode, file); +} + +static const struct file_operations trace_fops = { + .open = trace_open, + .read = seq_read, + .llseek = seq_lseek, + .release = trace_release, +}; + +int __init ras_add_daemon_trace(void) +{ + struct dentry *fentry; + + if (!ras_debugfs_dir) + return -ENOENT; + + fentry = debugfs_create_file("daemon_active", S_IRUSR, ras_debugfs_dir, + NULL, &trace_fops); + if (!fentry) + return -ENODEV; + + return 0; + +} + +void __init ras_debugfs_init(void) +{ + ras_debugfs_dir = debugfs_create_dir("ras", NULL); +} diff --git a/drivers/ras/ras.c b/drivers/ras/ras.c new file mode 100644 index 000000000000..b67dd362b7b6 --- /dev/null +++ b/drivers/ras/ras.c @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2014 Intel Corporation + * + * Authors: + * Chen, Gong <gong.chen@linux.intel.com> + */ + +#include <linux/init.h> +#include <linux/ras.h> + +#define CREATE_TRACE_POINTS +#define TRACE_INCLUDE_PATH ../../include/ras +#include <ras/ras_event.h> + +static int __init ras_init(void) +{ + int rc = 0; + + ras_debugfs_init(); + rc = ras_add_daemon_trace(); + + return rc; +} +subsys_initcall(ras_init); + +#if defined(CONFIG_ACPI_EXTLOG) || defined(CONFIG_ACPI_EXTLOG_MODULE) +EXPORT_TRACEPOINT_SYMBOL_GPL(extlog_mem_event); +#endif +EXPORT_TRACEPOINT_SYMBOL_GPL(mc_event); |