diff options
author | Ben Dooks <ben.dooks@codethink.co.uk> | 2025-08-07 17:49:33 +0100 |
---|---|---|
committer | Leo Yu-Chi Liang <ycliang@andestech.com> | 2025-08-14 15:33:00 +0800 |
commit | 35d6caad6de93b3a5545df7bcdc6d322a4edb93c (patch) | |
tree | be2fc713df109399cc086870737f8c6b5260bb44 | |
parent | 909ceacf4a8ac5360c25588f9db7b6dbd19c5c06 (diff) |
arch/riscv/lib: update memmove and memcpy for big-endian
Change the shift patterns for the unaligned memory move and copy code
to deal with big-endian by definign macros to change the shfit left and
right to go the opposite way.
Signed-off-by: Ben Dooks <ben.dooks@codethink.co.uk>
Reviewed-by: Leo Yu-Chi Liang <ycliang@andestech.com>
-rw-r--r-- | arch/riscv/lib/memcpy.S | 12 | ||||
-rw-r--r-- | arch/riscv/lib/memmove.S | 12 |
2 files changed, 20 insertions, 4 deletions
diff --git a/arch/riscv/lib/memcpy.S b/arch/riscv/lib/memcpy.S index 9884077c933..e5479bbe84e 100644 --- a/arch/riscv/lib/memcpy.S +++ b/arch/riscv/lib/memcpy.S @@ -125,6 +125,14 @@ WEAK(memcpy) .copy_end: ret +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +#define M_SLL sll +#define M_SRL srl +#else +#define M_SLL srl +#define M_SRL sll +#endif + .Lmisaligned_word_copy: /* * Misaligned word-wise copy. @@ -144,10 +152,10 @@ WEAK(memcpy) addi t0, t0, -(SZREG-1) /* At least one iteration will be executed here, no check */ 1: - srl a4, a5, t3 + M_SRL a4, a5, t3 REG_L a5, SZREG(a1) addi a1, a1, SZREG - sll a2, a5, t4 + M_SLL a2, a5, t4 or a2, a2, a4 REG_S a2, 0(a0) addi a0, a0, SZREG diff --git a/arch/riscv/lib/memmove.S b/arch/riscv/lib/memmove.S index fbe6701dbe4..b2c1c736713 100644 --- a/arch/riscv/lib/memmove.S +++ b/arch/riscv/lib/memmove.S @@ -91,6 +91,14 @@ WEAK(memmove) mv a0, t0 ret +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +#define M_SLL sll +#define M_SRL srl +#else +#define M_SLL srl +#define M_SRL sll +#endif + .Lmisaligned_word_copy: /* * Misaligned word-wise copy. @@ -110,10 +118,10 @@ WEAK(memmove) addi t0, t0, SZREG-1 /* At least one iteration will be executed here, no check */ 1: - sll a4, a5, t4 + M_SLL a4, a5, t4 addi a1, a1, -SZREG REG_L a5, 0(a1) - srl a2, a5, t3 + M_SRL a2, a5, t3 or a2, a2, a4 addi a0, a0, -SZREG REG_S a2, 0(a0) |