From b987ffc18fb3b3b76b059aa9e372dbee26f7c4f2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 2 Nov 2018 14:26:53 +0100 Subject: x86/qspinlock: Fix compile error With a compiler that has asm-goto but not asm-cc-output and CONFIG_PROFILE_ALL_BRANCHES=y we get a compiler error: arch/x86/include/asm/rmwcc.h:23:17: error: jump into statement expression Fix this by writing the if() as a boolean multiplication instead. Reported-by: kbuild test robot Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Cc: linux-kernel@vger.kernel.org Fixes: 7aa54be29765 ("locking/qspinlock, x86: Provide liveness guarantee") Signed-off-by: Ingo Molnar --- arch/x86/include/asm/qspinlock.h | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index 87623c6b13db..bd5ac6cc37db 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -13,12 +13,15 @@ #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock) { - u32 val = 0; - - if (GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c, - "I", _Q_PENDING_OFFSET)) - val |= _Q_PENDING_VAL; + u32 val; + /* + * We can't use GEN_BINARY_RMWcc() inside an if() stmt because asm goto + * and CONFIG_PROFILE_ALL_BRANCHES=y results in a label inside a + * statement expression, which GCC doesn't like. + */ + val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c, + "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL; val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK; return val; -- cgit v1.2.3 From b42967dcac1d4f5b059ec25568136462bcb051fe Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Mon, 29 Oct 2018 15:17:31 +0800 Subject: x86/hyper-v: Fix indentation in hv_do_fast_hypercall16() Remove the surplus TAB in hv_do_fast_hypercall16(). Signed-off-by: Yi Wang Signed-off-by: Thomas Gleixner Cc: kys@microsoft.com Cc: haiyangz@microsoft.com Cc: sthemmin@microsoft.com Cc: bp@alien8.de Cc: hpa@zytor.com Cc: devel@linuxdriverproject.org Cc: zhong.weidong@zte.com.cn Link: https://lkml.kernel.org/r/1540797451-2792-1-git-send-email-wang.yi59@zte.com.cn --- arch/x86/include/asm/mshyperv.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 0d6271cce198..1d0a7778e163 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -232,7 +232,7 @@ static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2) : "cc"); } #endif - return hv_status; + return hv_status; } /* -- cgit v1.2.3 From 5d96c9342c23ee1d084802dcf064caa67ecaa45b Mon Sep 17 00:00:00 2001 From: Vishal Verma Date: Thu, 25 Oct 2018 18:37:28 -0600 Subject: acpi/nfit, x86/mce: Handle only uncorrectable machine checks The MCE handler for nfit devices is called for memory errors on a Non-Volatile DIMM and adds the error location to a 'badblocks' list. This list is used by the various NVDIMM drivers to avoid consuming known poison locations during IO. The MCE handler gets called for both corrected and uncorrectable errors. Until now, both kinds of errors have been added to the badblocks list. However, corrected memory errors indicate that the problem has already been fixed by hardware, and the resulting interrupt is merely a notification to Linux. As far as future accesses to that location are concerned, it is perfectly fine to use, and thus doesn't need to be included in the above badblocks list. Add a check in the nfit MCE handler to filter out corrected mce events, and only process uncorrectable errors. Fixes: 6839a6d96f4e ("nfit: do an ARS scrub on hitting a latent media error") Reported-by: Omar Avelar Signed-off-by: Vishal Verma Signed-off-by: Borislav Petkov CC: Arnd Bergmann CC: Dan Williams CC: Dave Jiang CC: elliott@hpe.com CC: "H. Peter Anvin" CC: Ingo Molnar CC: Len Brown CC: linux-acpi@vger.kernel.org CC: linux-edac CC: linux-nvdimm@lists.01.org CC: Qiuxu Zhuo CC: "Rafael J. Wysocki" CC: Ross Zwisler CC: stable CC: Thomas Gleixner CC: Tony Luck CC: x86-ml CC: Yazen Ghannam Link: http://lkml.kernel.org/r/20181026003729.8420-1-vishal.l.verma@intel.com --- arch/x86/include/asm/mce.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 4da9b1c58d28..dbd9fe2f6163 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -221,6 +221,7 @@ static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_am int mce_available(struct cpuinfo_x86 *c); bool mce_is_memory_error(struct mce *m); +bool mce_is_correctable(struct mce *m); DECLARE_PER_CPU(unsigned, mce_exception_count); DECLARE_PER_CPU(unsigned, mce_poll_count); -- cgit v1.2.3 From e8a308e5f47e545e0d41d0686c00f5f5217c5f61 Mon Sep 17 00:00:00 2001 From: Vishal Verma Date: Thu, 25 Oct 2018 18:37:29 -0600 Subject: acpi/nfit, x86/mce: Validate a MCE's address before using it The NFIT machine check handler uses the physical address from the mce structure, and compares it against information in the ACPI NFIT table to determine whether that location lies on an NVDIMM. The mce->addr field however may not always be valid, and this is indicated by the MCI_STATUS_ADDRV bit in the status field. Export mce_usable_address() which already performs validation for the address, and use it in the NFIT handler. Fixes: 6839a6d96f4e ("nfit: do an ARS scrub on hitting a latent media error") Reported-by: Robert Elliott Signed-off-by: Vishal Verma Signed-off-by: Borislav Petkov CC: Arnd Bergmann Cc: Dan Williams CC: Dave Jiang CC: elliott@hpe.com CC: "H. Peter Anvin" CC: Ingo Molnar CC: Len Brown CC: linux-acpi@vger.kernel.org CC: linux-edac CC: linux-nvdimm@lists.01.org CC: Qiuxu Zhuo CC: "Rafael J. Wysocki" CC: Ross Zwisler CC: stable CC: Thomas Gleixner CC: Tony Luck CC: x86-ml CC: Yazen Ghannam Link: http://lkml.kernel.org/r/20181026003729.8420-2-vishal.l.verma@intel.com --- arch/x86/include/asm/mce.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index dbd9fe2f6163..c1a812bd5a27 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -222,6 +222,7 @@ static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_am int mce_available(struct cpuinfo_x86 *c); bool mce_is_memory_error(struct mce *m); bool mce_is_correctable(struct mce *m); +int mce_usable_address(struct mce *m); DECLARE_PER_CPU(unsigned, mce_exception_count); DECLARE_PER_CPU(unsigned, mce_poll_count); -- cgit v1.2.3 From d52888aa2753e3063a9d3a0c9f72f94aa9809c15 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 26 Oct 2018 15:28:54 +0300 Subject: x86/mm: Move LDT remap out of KASLR region on 5-level paging On 5-level paging the LDT remap area is placed in the middle of the KASLR randomization region and it can overlap with the direct mapping, the vmalloc or the vmap area. The LDT mapping is per mm, so it cannot be moved into the P4D page table next to the CPU_ENTRY_AREA without complicating PGD table allocation for 5-level paging. The 4 PGD slot gap just before the direct mapping is reserved for hypervisors, so it cannot be used. Move the direct mapping one slot deeper and use the resulting gap for the LDT remap area. The resulting layout is the same for 4 and 5 level paging. [ tglx: Massaged changelog ] Fixes: f55f0501cbf6 ("x86/pti: Put the LDT in its own PGD if PTI is on") Signed-off-by: Kirill A. Shutemov Signed-off-by: Thomas Gleixner Reviewed-by: Andy Lutomirski Cc: bp@alien8.de Cc: hpa@zytor.com Cc: dave.hansen@linux.intel.com Cc: peterz@infradead.org Cc: boris.ostrovsky@oracle.com Cc: jgross@suse.com Cc: bhe@redhat.com Cc: willy@infradead.org Cc: linux-mm@kvack.org Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20181026122856.66224-2-kirill.shutemov@linux.intel.com --- arch/x86/include/asm/page_64_types.h | 12 +++++++----- arch/x86/include/asm/pgtable_64_types.h | 4 +--- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index cd0cf1c568b4..8f657286d599 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -33,12 +33,14 @@ /* * Set __PAGE_OFFSET to the most negative possible address + - * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a - * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's - * what Xen requires. + * PGDIR_SIZE*17 (pgd slot 273). + * + * The gap is to allow a space for LDT remap for PTI (1 pgd slot) and space for + * a hypervisor (16 slots). Choosing 16 slots for a hypervisor is arbitrary, + * but it's what Xen requires. */ -#define __PAGE_OFFSET_BASE_L5 _AC(0xff10000000000000, UL) -#define __PAGE_OFFSET_BASE_L4 _AC(0xffff880000000000, UL) +#define __PAGE_OFFSET_BASE_L5 _AC(0xff11000000000000, UL) +#define __PAGE_OFFSET_BASE_L4 _AC(0xffff888000000000, UL) #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT #define __PAGE_OFFSET page_offset_base diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 04edd2d58211..84bd9bdc1987 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -111,9 +111,7 @@ extern unsigned int ptrs_per_p4d; */ #define MAXMEM (1UL << MAX_PHYSMEM_BITS) -#define LDT_PGD_ENTRY_L4 -3UL -#define LDT_PGD_ENTRY_L5 -112UL -#define LDT_PGD_ENTRY (pgtable_l5_enabled() ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4) +#define LDT_PGD_ENTRY -240UL #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) #define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE) -- cgit v1.2.3 From 1457d8cf7664f34c4ba534c1073821a559a2f6f9 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Wed, 7 Nov 2018 18:01:00 +0100 Subject: x86/xen: fix pv boot Commit 9da3f2b7405440 ("x86/fault: BUG() when uaccess helpers fault on kernel addresses") introduced a regression for booting Xen PV guests. Xen PV guests are using __put_user() and __get_user() for accessing the p2m map (physical to machine frame number map) as accesses might fail in case of not populated areas of the map. With above commit using __put_user() and __get_user() for accessing kernel pages is no longer valid. So replace the Xen hack by adding appropriate p2m access functions using the default fixup handler. Fixes: 9da3f2b7405440 ("x86/fault: BUG() when uaccess helpers fault on kernel addresses") Signed-off-by: Juergen Gross Reviewed-by: Andrew Cooper Signed-off-by: Juergen Gross --- arch/x86/include/asm/xen/page.h | 35 +++++++++++++++++++++++++++++++---- 1 file changed, 31 insertions(+), 4 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 123e669bf363..790ce08e41f2 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -9,7 +9,7 @@ #include #include -#include +#include #include #include @@ -93,12 +93,39 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, */ static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val) { - return __put_user(val, (unsigned long __user *)addr); + int ret = 0; + + asm volatile("1: mov %[val], %[ptr]\n" + "2:\n" + ".section .fixup, \"ax\"\n" + "3: sub $1, %[ret]\n" + " jmp 2b\n" + ".previous\n" + _ASM_EXTABLE(1b, 3b) + : [ret] "+r" (ret), [ptr] "=m" (*addr) + : [val] "r" (val)); + + return ret; } -static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val) +static inline int xen_safe_read_ulong(const unsigned long *addr, + unsigned long *val) { - return __get_user(*val, (unsigned long __user *)addr); + int ret = 0; + unsigned long rval = ~0ul; + + asm volatile("1: mov %[ptr], %[rval]\n" + "2:\n" + ".section .fixup, \"ax\"\n" + "3: sub $1, %[ret]\n" + " jmp 2b\n" + ".previous\n" + _ASM_EXTABLE(1b, 3b) + : [ret] "+r" (ret), [rval] "+r" (rval) + : [ptr] "m" (*addr)); + *val = rval; + + return ret; } #ifdef CONFIG_XEN_PV -- cgit v1.2.3