From 0e9867b7113c56b367f2e753cd411cf7cef0d2ec Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 28 Dec 2016 11:33:48 +0100 Subject: s390/ctl_reg: make __ctl_load a full memory barrier [ Upstream commit e991c24d68b8c0ba297eeb7af80b1e398e98c33f ] We have quite a lot of code that depends on the order of the __ctl_load inline assemby and subsequent memory accesses, like e.g. disabling lowcore protection and the writing to lowcore. Since the __ctl_load macro does not have memory barrier semantics, nor any other dependencies the compiler is, theoretically, free to shuffle code around. Or in other words: storing to lowcore could happen before lowcore protection is disabled. In order to avoid this class of potential bugs simply add a full memory barrier to the __ctl_load macro. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/s390/include/asm/ctl_reg.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/s390/include/asm/ctl_reg.h') diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index d7697ab802f6..8e136b88cdf4 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h @@ -15,7 +15,9 @@ BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ asm volatile( \ " lctlg %1,%2,%0\n" \ - : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\ + : \ + : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \ + : "memory"); \ } #define __ctl_store(array, low, high) { \ -- cgit v1.2.3