summaryrefslogtreecommitdiff
path: root/lib/raid6/rvv.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/raid6/rvv.c')
-rw-r--r--lib/raid6/rvv.c111
1 files changed, 58 insertions, 53 deletions
diff --git a/lib/raid6/rvv.c b/lib/raid6/rvv.c
index f0887344b274..89da5fc247aa 100644
--- a/lib/raid6/rvv.c
+++ b/lib/raid6/rvv.c
@@ -9,11 +9,8 @@
* Copyright 2002-2004 H. Peter Anvin
*/
-#include <asm/simd.h>
#include <asm/vector.h>
-#include <crypto/internal/simd.h>
#include <linux/raid/pq.h>
-#include <linux/types.h>
#include "rvv.h"
#define NSIZE (riscv_v_vsize / 32) /* NSIZE = vlenb */
@@ -26,9 +23,9 @@ static int rvv_has_vector(void)
static void raid6_rvv1_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
- unsigned long d;
- int z, z0;
u8 *p, *q;
+ unsigned long vl, d;
+ int z, z0;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0 + 1]; /* XOR parity */
@@ -36,8 +33,9 @@ static void raid6_rvv1_gen_syndrome_real(int disks, unsigned long bytes, void **
asm volatile (".option push\n"
".option arch,+v\n"
- "vsetvli t0, x0, e8, m1, ta, ma\n"
+ "vsetvli %0, x0, e8, m1, ta, ma\n"
".option pop\n"
+ : "=&r" (vl)
);
/* v0:wp0, v1:wq0, v2:wd0/w20, v3:w10 */
@@ -46,7 +44,7 @@ static void raid6_rvv1_gen_syndrome_real(int disks, unsigned long bytes, void **
asm volatile (".option push\n"
".option arch,+v\n"
"vle8.v v0, (%[wp0])\n"
- "vle8.v v1, (%[wp0])\n"
+ "vmv.v.v v1, v0\n"
".option pop\n"
: :
[wp0]"r"(&dptr[z0][d + 0 * NSIZE])
@@ -99,7 +97,7 @@ static void raid6_rvv1_xor_syndrome_real(int disks, int start, int stop,
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
- unsigned long d;
+ unsigned long vl, d;
int z, z0;
z0 = stop; /* P/Q right side optimization */
@@ -108,8 +106,9 @@ static void raid6_rvv1_xor_syndrome_real(int disks, int start, int stop,
asm volatile (".option push\n"
".option arch,+v\n"
- "vsetvli t0, x0, e8, m1, ta, ma\n"
+ "vsetvli %0, x0, e8, m1, ta, ma\n"
".option pop\n"
+ : "=&r" (vl)
);
/* v0:wp0, v1:wq0, v2:wd0/w20, v3:w10 */
@@ -118,7 +117,7 @@ static void raid6_rvv1_xor_syndrome_real(int disks, int start, int stop,
asm volatile (".option push\n"
".option arch,+v\n"
"vle8.v v0, (%[wp0])\n"
- "vle8.v v1, (%[wp0])\n"
+ "vmv.v.v v1, v0\n"
".option pop\n"
: :
[wp0]"r"(&dptr[z0][d + 0 * NSIZE])
@@ -195,9 +194,9 @@ static void raid6_rvv1_xor_syndrome_real(int disks, int start, int stop,
static void raid6_rvv2_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
- unsigned long d;
- int z, z0;
u8 *p, *q;
+ unsigned long vl, d;
+ int z, z0;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0 + 1]; /* XOR parity */
@@ -205,8 +204,9 @@ static void raid6_rvv2_gen_syndrome_real(int disks, unsigned long bytes, void **
asm volatile (".option push\n"
".option arch,+v\n"
- "vsetvli t0, x0, e8, m1, ta, ma\n"
+ "vsetvli %0, x0, e8, m1, ta, ma\n"
".option pop\n"
+ : "=&r" (vl)
);
/*
@@ -218,9 +218,9 @@ static void raid6_rvv2_gen_syndrome_real(int disks, unsigned long bytes, void **
asm volatile (".option push\n"
".option arch,+v\n"
"vle8.v v0, (%[wp0])\n"
- "vle8.v v1, (%[wp0])\n"
+ "vmv.v.v v1, v0\n"
"vle8.v v4, (%[wp1])\n"
- "vle8.v v5, (%[wp1])\n"
+ "vmv.v.v v5, v4\n"
".option pop\n"
: :
[wp0]"r"(&dptr[z0][d + 0 * NSIZE]),
@@ -287,7 +287,7 @@ static void raid6_rvv2_xor_syndrome_real(int disks, int start, int stop,
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
- unsigned long d;
+ unsigned long vl, d;
int z, z0;
z0 = stop; /* P/Q right side optimization */
@@ -296,8 +296,9 @@ static void raid6_rvv2_xor_syndrome_real(int disks, int start, int stop,
asm volatile (".option push\n"
".option arch,+v\n"
- "vsetvli t0, x0, e8, m1, ta, ma\n"
+ "vsetvli %0, x0, e8, m1, ta, ma\n"
".option pop\n"
+ : "=&r" (vl)
);
/*
@@ -309,9 +310,9 @@ static void raid6_rvv2_xor_syndrome_real(int disks, int start, int stop,
asm volatile (".option push\n"
".option arch,+v\n"
"vle8.v v0, (%[wp0])\n"
- "vle8.v v1, (%[wp0])\n"
+ "vmv.v.v v1, v0\n"
"vle8.v v4, (%[wp1])\n"
- "vle8.v v5, (%[wp1])\n"
+ "vmv.v.v v5, v4\n"
".option pop\n"
: :
[wp0]"r"(&dptr[z0][d + 0 * NSIZE]),
@@ -413,9 +414,9 @@ static void raid6_rvv2_xor_syndrome_real(int disks, int start, int stop,
static void raid6_rvv4_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
- unsigned long d;
- int z, z0;
u8 *p, *q;
+ unsigned long vl, d;
+ int z, z0;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0 + 1]; /* XOR parity */
@@ -423,8 +424,9 @@ static void raid6_rvv4_gen_syndrome_real(int disks, unsigned long bytes, void **
asm volatile (".option push\n"
".option arch,+v\n"
- "vsetvli t0, x0, e8, m1, ta, ma\n"
+ "vsetvli %0, x0, e8, m1, ta, ma\n"
".option pop\n"
+ : "=&r" (vl)
);
/*
@@ -438,13 +440,13 @@ static void raid6_rvv4_gen_syndrome_real(int disks, unsigned long bytes, void **
asm volatile (".option push\n"
".option arch,+v\n"
"vle8.v v0, (%[wp0])\n"
- "vle8.v v1, (%[wp0])\n"
+ "vmv.v.v v1, v0\n"
"vle8.v v4, (%[wp1])\n"
- "vle8.v v5, (%[wp1])\n"
+ "vmv.v.v v5, v4\n"
"vle8.v v8, (%[wp2])\n"
- "vle8.v v9, (%[wp2])\n"
+ "vmv.v.v v9, v8\n"
"vle8.v v12, (%[wp3])\n"
- "vle8.v v13, (%[wp3])\n"
+ "vmv.v.v v13, v12\n"
".option pop\n"
: :
[wp0]"r"(&dptr[z0][d + 0 * NSIZE]),
@@ -539,7 +541,7 @@ static void raid6_rvv4_xor_syndrome_real(int disks, int start, int stop,
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
- unsigned long d;
+ unsigned long vl, d;
int z, z0;
z0 = stop; /* P/Q right side optimization */
@@ -548,8 +550,9 @@ static void raid6_rvv4_xor_syndrome_real(int disks, int start, int stop,
asm volatile (".option push\n"
".option arch,+v\n"
- "vsetvli t0, x0, e8, m1, ta, ma\n"
+ "vsetvli %0, x0, e8, m1, ta, ma\n"
".option pop\n"
+ : "=&r" (vl)
);
/*
@@ -563,13 +566,13 @@ static void raid6_rvv4_xor_syndrome_real(int disks, int start, int stop,
asm volatile (".option push\n"
".option arch,+v\n"
"vle8.v v0, (%[wp0])\n"
- "vle8.v v1, (%[wp0])\n"
+ "vmv.v.v v1, v0\n"
"vle8.v v4, (%[wp1])\n"
- "vle8.v v5, (%[wp1])\n"
+ "vmv.v.v v5, v4\n"
"vle8.v v8, (%[wp2])\n"
- "vle8.v v9, (%[wp2])\n"
+ "vmv.v.v v9, v8\n"
"vle8.v v12, (%[wp3])\n"
- "vle8.v v13, (%[wp3])\n"
+ "vmv.v.v v13, v12\n"
".option pop\n"
: :
[wp0]"r"(&dptr[z0][d + 0 * NSIZE]),
@@ -721,9 +724,9 @@ static void raid6_rvv4_xor_syndrome_real(int disks, int start, int stop,
static void raid6_rvv8_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
- unsigned long d;
- int z, z0;
u8 *p, *q;
+ unsigned long vl, d;
+ int z, z0;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0 + 1]; /* XOR parity */
@@ -731,8 +734,9 @@ static void raid6_rvv8_gen_syndrome_real(int disks, unsigned long bytes, void **
asm volatile (".option push\n"
".option arch,+v\n"
- "vsetvli t0, x0, e8, m1, ta, ma\n"
+ "vsetvli %0, x0, e8, m1, ta, ma\n"
".option pop\n"
+ : "=&r" (vl)
);
/*
@@ -750,21 +754,21 @@ static void raid6_rvv8_gen_syndrome_real(int disks, unsigned long bytes, void **
asm volatile (".option push\n"
".option arch,+v\n"
"vle8.v v0, (%[wp0])\n"
- "vle8.v v1, (%[wp0])\n"
+ "vmv.v.v v1, v0\n"
"vle8.v v4, (%[wp1])\n"
- "vle8.v v5, (%[wp1])\n"
+ "vmv.v.v v5, v4\n"
"vle8.v v8, (%[wp2])\n"
- "vle8.v v9, (%[wp2])\n"
+ "vmv.v.v v9, v8\n"
"vle8.v v12, (%[wp3])\n"
- "vle8.v v13, (%[wp3])\n"
+ "vmv.v.v v13, v12\n"
"vle8.v v16, (%[wp4])\n"
- "vle8.v v17, (%[wp4])\n"
+ "vmv.v.v v17, v16\n"
"vle8.v v20, (%[wp5])\n"
- "vle8.v v21, (%[wp5])\n"
+ "vmv.v.v v21, v20\n"
"vle8.v v24, (%[wp6])\n"
- "vle8.v v25, (%[wp6])\n"
+ "vmv.v.v v25, v24\n"
"vle8.v v28, (%[wp7])\n"
- "vle8.v v29, (%[wp7])\n"
+ "vmv.v.v v29, v28\n"
".option pop\n"
: :
[wp0]"r"(&dptr[z0][d + 0 * NSIZE]),
@@ -915,7 +919,7 @@ static void raid6_rvv8_xor_syndrome_real(int disks, int start, int stop,
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
- unsigned long d;
+ unsigned long vl, d;
int z, z0;
z0 = stop; /* P/Q right side optimization */
@@ -924,8 +928,9 @@ static void raid6_rvv8_xor_syndrome_real(int disks, int start, int stop,
asm volatile (".option push\n"
".option arch,+v\n"
- "vsetvli t0, x0, e8, m1, ta, ma\n"
+ "vsetvli %0, x0, e8, m1, ta, ma\n"
".option pop\n"
+ : "=&r" (vl)
);
/*
@@ -943,21 +948,21 @@ static void raid6_rvv8_xor_syndrome_real(int disks, int start, int stop,
asm volatile (".option push\n"
".option arch,+v\n"
"vle8.v v0, (%[wp0])\n"
- "vle8.v v1, (%[wp0])\n"
+ "vmv.v.v v1, v0\n"
"vle8.v v4, (%[wp1])\n"
- "vle8.v v5, (%[wp1])\n"
+ "vmv.v.v v5, v4\n"
"vle8.v v8, (%[wp2])\n"
- "vle8.v v9, (%[wp2])\n"
+ "vmv.v.v v9, v8\n"
"vle8.v v12, (%[wp3])\n"
- "vle8.v v13, (%[wp3])\n"
+ "vmv.v.v v13, v12\n"
"vle8.v v16, (%[wp4])\n"
- "vle8.v v17, (%[wp4])\n"
+ "vmv.v.v v17, v16\n"
"vle8.v v20, (%[wp5])\n"
- "vle8.v v21, (%[wp5])\n"
+ "vmv.v.v v21, v20\n"
"vle8.v v24, (%[wp6])\n"
- "vle8.v v25, (%[wp6])\n"
+ "vmv.v.v v25, v24\n"
"vle8.v v28, (%[wp7])\n"
- "vle8.v v29, (%[wp7])\n"
+ "vmv.v.v v29, v28\n"
".option pop\n"
: :
[wp0]"r"(&dptr[z0][d + 0 * NSIZE]),