summaryrefslogtreecommitdiff
path: root/arch/arm/include
diff options
context:
space:
mode:
authorVincent Whitchurch <vincent.whitchurch@axis.com>2018-11-09 10:12:30 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-12-21 10:40:52 +0100
commitfd9beff0ccd0ef44078be23d6ce5ba431e23f292 (patch)
treecef92260b7a6303196aa88e3e9ce08625c769884 /arch/arm/include
parentf443ac6116d640e4f9f39692e667c64bad3ee549 (diff)
ARM: 8813/1: Make aligned 2-byte getuser()/putuser() atomic on ARMv6+
[ Upstream commit 344eb5539abf3e0b6ce22568c03e86450073e097 ] getuser() and putuser() (and there underscored variants) use two strb[t]/ldrb[t] instructions when they are asked to get/put 16-bits. This means that the read/write is not atomic even when performed to a 16-bit-aligned address. This leads to problems with vhost: vhost uses __getuser() to read the vring's 16-bit avail.index field, and if it happens to observe a partial update of the index, wrong descriptors will be used which will lead to a breakdown of the virtio communication. A similar problem exists for __putuser() which is used to write to the vring's used.index field. The reason these functions use strb[t]/ldrb[t] is because strht/ldrht instructions did not exist until ARMv6T2/ARMv7. So we should be easily able to fix this on ARMv7. Also, since all ARMv6 processors also don't actually use the unprivileged instructions anymore for uaccess (since CONFIG_CPU_USE_DOMAINS is not used) we can easily fix them too. Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com> Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/uaccess.h18
1 files changed, 18 insertions, 0 deletions
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 0f6c6b873bc5..e05c31af48d1 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -379,6 +379,13 @@ do { \
#define __get_user_asm_byte(x, addr, err) \
__get_user_asm(x, addr, err, ldrb)
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define __get_user_asm_half(x, addr, err) \
+ __get_user_asm(x, addr, err, ldrh)
+
+#else
+
#ifndef __ARMEB__
#define __get_user_asm_half(x, __gu_addr, err) \
({ \
@@ -397,6 +404,8 @@ do { \
})
#endif
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
#define __get_user_asm_word(x, addr, err) \
__get_user_asm(x, addr, err, ldr)
#endif
@@ -472,6 +481,13 @@ do { \
#define __put_user_asm_byte(x, __pu_addr, err) \
__put_user_asm(x, __pu_addr, err, strb)
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define __put_user_asm_half(x, __pu_addr, err) \
+ __put_user_asm(x, __pu_addr, err, strh)
+
+#else
+
#ifndef __ARMEB__
#define __put_user_asm_half(x, __pu_addr, err) \
({ \
@@ -488,6 +504,8 @@ do { \
})
#endif
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
#define __put_user_asm_word(x, __pu_addr, err) \
__put_user_asm(x, __pu_addr, err, str)