summaryrefslogtreecommitdiff
path: root/arch/x86/vdso/vdso2c.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-14 14:46:29 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-14 14:46:29 -0700
commitc728762e064ad37bfd6689f2ac1f54d2355ac3ae (patch)
tree767b98c350a241b8d6fedb9f1f770516780f0918 /arch/x86/vdso/vdso2c.c
parent503698e12d68f6144b408b11156ad315943ffb53 (diff)
parenta934fb5bc9cd1260be89272cfb7a6c9dc71974d7 (diff)
Merge branch 'x86-vdso-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 vdso fixes from Peter Anvin: "Fixes for x86/vdso. One is a simple build fix for bigendian hosts, one is to make "make vdso_install" work again, and the rest is about working around a bug in Google's Go language -- two are documentation patches that improves the sample code that the Go coders took, modified, and broke; the other two implements a workaround that keeps existing Go binaries from segfaulting at least" * 'x86-vdso-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/vdso: Fix vdso_install x86/vdso: Hack to keep 64-bit Go programs working x86/vdso: Add PUT_LE to store little-endian values x86/vdso/doc: Make vDSO examples more portable x86/vdso/doc: Rename vdso_test.c to vdso_standalone_test_x86.c x86, vdso: Remove one final use of htole16()
Diffstat (limited to 'arch/x86/vdso/vdso2c.c')
-rw-r--r--arch/x86/vdso/vdso2c.c19
1 files changed, 16 insertions, 3 deletions
diff --git a/arch/x86/vdso/vdso2c.c b/arch/x86/vdso/vdso2c.c
index 450ac6eaf613..7a6bf50f9165 100644
--- a/arch/x86/vdso/vdso2c.c
+++ b/arch/x86/vdso/vdso2c.c
@@ -54,7 +54,7 @@ static void fail(const char *format, ...)
}
/*
- * Evil macros to do a little-endian read.
+ * Evil macros for little-endian reads and writes
*/
#define GLE(x, bits, ifnot) \
__builtin_choose_expr( \
@@ -62,11 +62,24 @@ static void fail(const char *format, ...)
(__typeof__(*(x)))get_unaligned_le##bits(x), ifnot)
extern void bad_get_le(void);
-#define LAST_LE(x) \
+#define LAST_GLE(x) \
__builtin_choose_expr(sizeof(*(x)) == 1, *(x), bad_get_le())
#define GET_LE(x) \
- GLE(x, 64, GLE(x, 32, GLE(x, 16, LAST_LE(x))))
+ GLE(x, 64, GLE(x, 32, GLE(x, 16, LAST_GLE(x))))
+
+#define PLE(x, val, bits, ifnot) \
+ __builtin_choose_expr( \
+ (sizeof(*(x)) == bits/8), \
+ put_unaligned_le##bits((val), (x)), ifnot)
+
+extern void bad_put_le(void);
+#define LAST_PLE(x, val) \
+ __builtin_choose_expr(sizeof(*(x)) == 1, *(x) = (val), bad_put_le())
+
+#define PUT_LE(x, val) \
+ PLE(x, val, 64, PLE(x, val, 32, PLE(x, val, 16, LAST_PLE(x, val))))
+
#define NSYMS (sizeof(required_syms) / sizeof(required_syms[0]))