summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig57
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/Kconfig.ubsan1
-rw-r--r--lib/alloc_tag.c15
-rw-r--r--lib/asn1_decoder.c1
-rw-r--r--lib/iov_iter.c2
-rw-r--r--lib/raid6/Makefile1
-rw-r--r--lib/raid6/algos.c9
-rw-r--r--lib/raid6/recov_rvv.c229
-rw-r--r--lib/raid6/rvv.c1212
-rw-r--r--lib/raid6/rvv.h39
-rw-r--r--lib/string.c13
-rw-r--r--lib/test_ubsan.c18
-rw-r--r--lib/tests/slub_kunit.c1
-rw-r--r--lib/ucs2_string.c1
-rw-r--r--lib/zlib_inflate/inflate_syms.c1
16 files changed, 1555 insertions, 47 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 61cce0686b53..6c1b8f184267 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -139,27 +139,22 @@ config TRACE_MMIO_ACCESS
source "lib/crypto/Kconfig"
config CRC_CCITT
- tristate "CRC-CCITT functions"
+ tristate
help
- This option is provided for the case where no in-kernel-tree
- modules require CRC-CCITT functions, but a module built outside
- the kernel tree does. Such modules that use library CRC-CCITT
- functions require M here.
+ The CRC-CCITT library functions. Select this if your module uses any
+ of the functions from <linux/crc-ccitt.h>.
config CRC16
- tristate "CRC16 functions"
+ tristate
help
- This option is provided for the case where no in-kernel-tree
- modules require CRC16 functions, but a module built outside
- the kernel tree does. Such modules that use library CRC16
- functions require M here.
+ The CRC16 library functions. Select this if your module uses any of
+ the functions from <linux/crc16.h>.
config CRC_T10DIF
- tristate "CRC calculation for the T10 Data Integrity Field"
+ tristate
help
- This option is only needed if a module that's not in the
- kernel tree needs to calculate CRC checks for use with the
- SCSI data integrity subsystem.
+ The CRC-T10DIF library functions. Select this if your module uses
+ any of the functions from <linux/crc-t10dif.h>.
config ARCH_HAS_CRC_T10DIF
bool
@@ -169,22 +164,17 @@ config CRC_T10DIF_ARCH
default CRC_T10DIF if ARCH_HAS_CRC_T10DIF && CRC_OPTIMIZATIONS
config CRC_ITU_T
- tristate "CRC ITU-T V.41 functions"
+ tristate
help
- This option is provided for the case where no in-kernel-tree
- modules require CRC ITU-T V.41 functions, but a module built outside
- the kernel tree does. Such modules that use library CRC ITU-T V.41
- functions require M here.
+ The CRC-ITU-T library functions. Select this if your module uses
+ any of the functions from <linux/crc-itu-t.h>.
config CRC32
- tristate "CRC32/CRC32c functions"
- default y
+ tristate
select BITREVERSE
help
- This option is provided for the case where no in-kernel-tree
- modules require CRC32/CRC32c functions, but a module built outside
- the kernel tree does. Such modules that use library CRC32/CRC32c
- functions require M here.
+ The CRC32 library functions. Select this if your module uses any of
+ the functions from <linux/crc32.h> or <linux/crc32c.h>.
config ARCH_HAS_CRC32
bool
@@ -195,6 +185,9 @@ config CRC32_ARCH
config CRC64
tristate
+ help
+ The CRC64 library functions. Select this if your module uses any of
+ the functions from <linux/crc64.h>.
config ARCH_HAS_CRC64
bool
@@ -205,19 +198,21 @@ config CRC64_ARCH
config CRC4
tristate
+ help
+ The CRC4 library functions. Select this if your module uses any of
+ the functions from <linux/crc4.h>.
config CRC7
tristate
-
-config LIBCRC32C
- tristate
- select CRC32
help
- This option just selects CRC32 and is provided for compatibility
- purposes until the users are updated to select CRC32 directly.
+ The CRC7 library functions. Select this if your module uses any of
+ the functions from <linux/crc7.h>.
config CRC8
tristate
+ help
+ The CRC8 library functions. Select this if your module uses any of
+ the functions from <linux/crc8.h>.
config CRC_OPTIMIZATIONS
bool "Enable optimized CRC implementations" if EXPERT
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 9fe4d8dfe578..f9051ab610d5 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -3290,7 +3290,7 @@ config GCD_KUNIT_TEST
config PRIME_NUMBERS_KUNIT_TEST
tristate "Prime number generator test" if !KUNIT_ALL_TESTS
depends on KUNIT
- select PRIME_NUMBERS
+ depends on PRIME_NUMBERS
default KUNIT_ALL_TESTS
help
This option enables the KUnit test suite for the {is,next}_prime_number
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 4216b3a4ff21..f6ea0c5b5da3 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -118,7 +118,6 @@ config UBSAN_UNREACHABLE
config UBSAN_INTEGER_WRAP
bool "Perform checking for integer arithmetic wrap-around"
- default UBSAN
depends on !COMPILE_TEST
depends on $(cc-option,-fsanitize-undefined-ignore-overflow-pattern=all)
depends on $(cc-option,-fsanitize=signed-integer-overflow)
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 1d893e313614..25ecc1334b67 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -422,11 +422,20 @@ static int vm_module_tags_populate(void)
unsigned long old_shadow_end = ALIGN(phys_end, MODULE_ALIGN);
unsigned long new_shadow_end = ALIGN(new_end, MODULE_ALIGN);
unsigned long more_pages;
- unsigned long nr;
+ unsigned long nr = 0;
more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT;
- nr = alloc_pages_bulk_node(GFP_KERNEL | __GFP_NOWARN,
- NUMA_NO_NODE, more_pages, next_page);
+ while (nr < more_pages) {
+ unsigned long allocated;
+
+ allocated = alloc_pages_bulk_node(GFP_KERNEL | __GFP_NOWARN,
+ NUMA_NO_NODE, more_pages - nr, next_page + nr);
+
+ if (!allocated)
+ break;
+ nr += allocated;
+ }
+
if (nr < more_pages ||
vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL,
next_page, PAGE_SHIFT) < 0) {
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 13da529e2e72..5738ae286b41 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -518,4 +518,5 @@ error:
}
EXPORT_SYMBOL_GPL(asn1_ber_decoder);
+MODULE_DESCRIPTION("Decoder for ASN.1 BER/DER/CER encoded bytestream");
MODULE_LICENSE("GPL");
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 8c7fdb7d8c8f..bc9391e55d57 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1191,7 +1191,7 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
return -ENOMEM;
p = *pages;
for (int k = 0; k < n; k++) {
- struct folio *folio = page_folio(page);
+ struct folio *folio = page_folio(page + k);
p[k] = page + k;
if (!folio_test_slab(folio))
folio_get(folio);
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 29127dd05d63..5be0a4e60ab1 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -10,6 +10,7 @@ raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o \
raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o
raid6_pq-$(CONFIG_LOONGARCH) += loongarch_simd.o recov_loongarch_simd.o
+raid6_pq-$(CONFIG_RISCV_ISA_V) += rvv.o recov_rvv.o
hostprogs += mktables
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index cd2e88ee1f14..99980ff5b985 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -81,6 +81,12 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_lsx,
#endif
#endif
+#ifdef CONFIG_RISCV_ISA_V
+ &raid6_rvvx1,
+ &raid6_rvvx2,
+ &raid6_rvvx4,
+ &raid6_rvvx8,
+#endif
&raid6_intx8,
&raid6_intx4,
&raid6_intx2,
@@ -116,6 +122,9 @@ const struct raid6_recov_calls *const raid6_recov_algos[] = {
&raid6_recov_lsx,
#endif
#endif
+#ifdef CONFIG_RISCV_ISA_V
+ &raid6_recov_rvv,
+#endif
&raid6_recov_intx1,
NULL
};
diff --git a/lib/raid6/recov_rvv.c b/lib/raid6/recov_rvv.c
new file mode 100644
index 000000000000..f29303795ccf
--- /dev/null
+++ b/lib/raid6/recov_rvv.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2024 Institute of Software, CAS.
+ * Author: Chunyan Zhang <zhangchunyan@iscas.ac.cn>
+ */
+
+#include <asm/simd.h>
+#include <asm/vector.h>
+#include <crypto/internal/simd.h>
+#include <linux/raid/pq.h>
+
+static int rvv_has_vector(void)
+{
+ return has_vector();
+}
+
+static void __raid6_2data_recov_rvv(int bytes, u8 *p, u8 *q, u8 *dp,
+ u8 *dq, const u8 *pbmul,
+ const u8 *qmul)
+{
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vsetvli x0, %[avl], e8, m1, ta, ma\n"
+ ".option pop\n"
+ : :
+ [avl]"r"(16)
+ );
+
+ /*
+ * while ( bytes-- ) {
+ * uint8_t px, qx, db;
+ *
+ * px = *p ^ *dp;
+ * qx = qmul[*q ^ *dq];
+ * *dq++ = db = pbmul[px] ^ qx;
+ * *dp++ = db ^ px;
+ * p++; q++;
+ * }
+ */
+ while (bytes) {
+ /*
+ * v0:px, v1:dp,
+ * v2:qx, v3:dq,
+ * v4:vx, v5:vy,
+ * v6:qm0, v7:qm1,
+ * v8:pm0, v9:pm1,
+ * v14:p/qm[vx], v15:p/qm[vy]
+ */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vle8.v v0, (%[px])\n"
+ "vle8.v v1, (%[dp])\n"
+ "vxor.vv v0, v0, v1\n"
+ "vle8.v v2, (%[qx])\n"
+ "vle8.v v3, (%[dq])\n"
+ "vxor.vv v4, v2, v3\n"
+ "vsrl.vi v5, v4, 4\n"
+ "vand.vi v4, v4, 0xf\n"
+ "vle8.v v6, (%[qm0])\n"
+ "vle8.v v7, (%[qm1])\n"
+ "vrgather.vv v14, v6, v4\n" /* v14 = qm[vx] */
+ "vrgather.vv v15, v7, v5\n" /* v15 = qm[vy] */
+ "vxor.vv v2, v14, v15\n" /* v2 = qmul[*q ^ *dq] */
+
+ "vsrl.vi v5, v0, 4\n"
+ "vand.vi v4, v0, 0xf\n"
+ "vle8.v v8, (%[pm0])\n"
+ "vle8.v v9, (%[pm1])\n"
+ "vrgather.vv v14, v8, v4\n" /* v14 = pm[vx] */
+ "vrgather.vv v15, v9, v5\n" /* v15 = pm[vy] */
+ "vxor.vv v4, v14, v15\n" /* v4 = pbmul[px] */
+ "vxor.vv v3, v4, v2\n" /* v3 = db = pbmul[px] ^ qx */
+ "vxor.vv v1, v3, v0\n" /* v1 = db ^ px; */
+ "vse8.v v3, (%[dq])\n"
+ "vse8.v v1, (%[dp])\n"
+ ".option pop\n"
+ : :
+ [px]"r"(p),
+ [dp]"r"(dp),
+ [qx]"r"(q),
+ [dq]"r"(dq),
+ [qm0]"r"(qmul),
+ [qm1]"r"(qmul + 16),
+ [pm0]"r"(pbmul),
+ [pm1]"r"(pbmul + 16)
+ :);
+
+ bytes -= 16;
+ p += 16;
+ q += 16;
+ dp += 16;
+ dq += 16;
+ }
+}
+
+static void __raid6_datap_recov_rvv(int bytes, u8 *p, u8 *q,
+ u8 *dq, const u8 *qmul)
+{
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vsetvli x0, %[avl], e8, m1, ta, ma\n"
+ ".option pop\n"
+ : :
+ [avl]"r"(16)
+ );
+
+ /*
+ * while (bytes--) {
+ * *p++ ^= *dq = qmul[*q ^ *dq];
+ * q++; dq++;
+ * }
+ */
+ while (bytes) {
+ /*
+ * v0:vx, v1:vy,
+ * v2:dq, v3:p,
+ * v4:qm0, v5:qm1,
+ * v10:m[vx], v11:m[vy]
+ */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vle8.v v0, (%[vx])\n"
+ "vle8.v v2, (%[dq])\n"
+ "vxor.vv v0, v0, v2\n"
+ "vsrl.vi v1, v0, 4\n"
+ "vand.vi v0, v0, 0xf\n"
+ "vle8.v v4, (%[qm0])\n"
+ "vle8.v v5, (%[qm1])\n"
+ "vrgather.vv v10, v4, v0\n"
+ "vrgather.vv v11, v5, v1\n"
+ "vxor.vv v0, v10, v11\n"
+ "vle8.v v1, (%[vy])\n"
+ "vxor.vv v1, v0, v1\n"
+ "vse8.v v0, (%[dq])\n"
+ "vse8.v v1, (%[vy])\n"
+ ".option pop\n"
+ : :
+ [vx]"r"(q),
+ [vy]"r"(p),
+ [dq]"r"(dq),
+ [qm0]"r"(qmul),
+ [qm1]"r"(qmul + 16)
+ :);
+
+ bytes -= 16;
+ p += 16;
+ q += 16;
+ dq += 16;
+ }
+}
+
+static void raid6_2data_recov_rvv(int disks, size_t bytes, int faila,
+ int failb, void **ptrs)
+{
+ u8 *p, *q, *dp, *dq;
+ const u8 *pbmul; /* P multiplier table for B data */
+ const u8 *qmul; /* Q multiplier table (for both) */
+
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data pages
+ * Use the dead data pages as temporary storage for
+ * delta p and delta q
+ */
+ dp = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 2] = dp;
+ dq = (u8 *)ptrs[failb];
+ ptrs[failb] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dp;
+ ptrs[failb] = dq;
+ ptrs[disks - 2] = p;
+ ptrs[disks - 1] = q;
+
+ /* Now, pick the proper data tables */
+ pbmul = raid6_vgfmul[raid6_gfexi[failb - faila]];
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
+ raid6_gfexp[failb]]];
+
+ kernel_vector_begin();
+ __raid6_2data_recov_rvv(bytes, p, q, dp, dq, pbmul, qmul);
+ kernel_vector_end();
+}
+
+static void raid6_datap_recov_rvv(int disks, size_t bytes, int faila,
+ void **ptrs)
+{
+ u8 *p, *q, *dq;
+ const u8 *qmul; /* Q multiplier table */
+
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data page
+ * Use the dead data page as temporary storage for delta q
+ */
+ dq = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dq;
+ ptrs[disks - 1] = q;
+
+ /* Now, pick the proper data tables */
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
+
+ kernel_vector_begin();
+ __raid6_datap_recov_rvv(bytes, p, q, dq, qmul);
+ kernel_vector_end();
+}
+
+const struct raid6_recov_calls raid6_recov_rvv = {
+ .data2 = raid6_2data_recov_rvv,
+ .datap = raid6_datap_recov_rvv,
+ .valid = rvv_has_vector,
+ .name = "rvv",
+ .priority = 1,
+};
diff --git a/lib/raid6/rvv.c b/lib/raid6/rvv.c
new file mode 100644
index 000000000000..f0887344b274
--- /dev/null
+++ b/lib/raid6/rvv.c
@@ -0,0 +1,1212 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RAID-6 syndrome calculation using RISC-V vector instructions
+ *
+ * Copyright 2024 Institute of Software, CAS.
+ * Author: Chunyan Zhang <zhangchunyan@iscas.ac.cn>
+ *
+ * Based on neon.uc:
+ * Copyright 2002-2004 H. Peter Anvin
+ */
+
+#include <asm/simd.h>
+#include <asm/vector.h>
+#include <crypto/internal/simd.h>
+#include <linux/raid/pq.h>
+#include <linux/types.h>
+#include "rvv.h"
+
+#define NSIZE (riscv_v_vsize / 32) /* NSIZE = vlenb */
+
+static int rvv_has_vector(void)
+{
+ return has_vector();
+}
+
+static void raid6_rvv1_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ unsigned long d;
+ int z, z0;
+ u8 *p, *q;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0 + 1]; /* XOR parity */
+ q = dptr[z0 + 2]; /* RS syndrome */
+
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vsetvli t0, x0, e8, m1, ta, ma\n"
+ ".option pop\n"
+ );
+
+ /* v0:wp0, v1:wq0, v2:wd0/w20, v3:w10 */
+ for (d = 0; d < bytes; d += NSIZE * 1) {
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vle8.v v0, (%[wp0])\n"
+ "vle8.v v1, (%[wp0])\n"
+ ".option pop\n"
+ : :
+ [wp0]"r"(&dptr[z0][d + 0 * NSIZE])
+ );
+
+ for (z = z0 - 1 ; z >= 0 ; z--) {
+ /*
+ * w2$$ = MASK(wq$$);
+ * w1$$ = SHLBYTE(wq$$);
+ * w2$$ &= NBYTES(0x1d);
+ * w1$$ ^= w2$$;
+ * wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
+ * wq$$ = w1$$ ^ wd$$;
+ * wp$$ ^= wd$$;
+ */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vsra.vi v2, v1, 7\n"
+ "vsll.vi v3, v1, 1\n"
+ "vand.vx v2, v2, %[x1d]\n"
+ "vxor.vv v3, v3, v2\n"
+ "vle8.v v2, (%[wd0])\n"
+ "vxor.vv v1, v3, v2\n"
+ "vxor.vv v0, v0, v2\n"
+ ".option pop\n"
+ : :
+ [wd0]"r"(&dptr[z][d + 0 * NSIZE]),
+ [x1d]"r"(0x1d)
+ );
+ }
+
+ /*
+ * *(unative_t *)&p[d+NSIZE*$$] = wp$$;
+ * *(unative_t *)&q[d+NSIZE*$$] = wq$$;
+ */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vse8.v v0, (%[wp0])\n"
+ "vse8.v v1, (%[wq0])\n"
+ ".option pop\n"
+ : :
+ [wp0]"r"(&p[d + NSIZE * 0]),
+ [wq0]"r"(&q[d + NSIZE * 0])
+ );
+ }
+}
+
+static void raid6_rvv1_xor_syndrome_real(int disks, int start, int stop,
+ unsigned long bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ unsigned long d;
+ int z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks - 2]; /* XOR parity */
+ q = dptr[disks - 1]; /* RS syndrome */
+
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vsetvli t0, x0, e8, m1, ta, ma\n"
+ ".option pop\n"
+ );
+
+ /* v0:wp0, v1:wq0, v2:wd0/w20, v3:w10 */
+ for (d = 0 ; d < bytes ; d += NSIZE * 1) {
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vle8.v v0, (%[wp0])\n"
+ "vle8.v v1, (%[wp0])\n"
+ ".option pop\n"
+ : :
+ [wp0]"r"(&dptr[z0][d + 0 * NSIZE])
+ );
+
+ /* P/Q data pages */
+ for (z = z0 - 1; z >= start; z--) {
+ /*
+ * w2$$ = MASK(wq$$);
+ * w1$$ = SHLBYTE(wq$$);
+ * w2$$ &= NBYTES(0x1d);
+ * w1$$ ^= w2$$;
+ * wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
+ * wq$$ = w1$$ ^ wd$$;
+ * wp$$ ^= wd$$;
+ */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vsra.vi v2, v1, 7\n"
+ "vsll.vi v3, v1, 1\n"
+ "vand.vx v2, v2, %[x1d]\n"
+ "vxor.vv v3, v3, v2\n"
+ "vle8.v v2, (%[wd0])\n"
+ "vxor.vv v1, v3, v2\n"
+ "vxor.vv v0, v0, v2\n"
+ ".option pop\n"
+ : :
+ [wd0]"r"(&dptr[z][d + 0 * NSIZE]),
+ [x1d]"r"(0x1d)
+ );
+ }
+
+ /* P/Q left side optimization */
+ for (z = start - 1; z >= 0; z--) {
+ /*
+ * w2$$ = MASK(wq$$);
+ * w1$$ = SHLBYTE(wq$$);
+ * w2$$ &= NBYTES(0x1d);
+ * wq$$ = w1$$ ^ w2$$;
+ */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vsra.vi v2, v1, 7\n"
+ "vsll.vi v3, v1, 1\n"
+ "vand.vx v2, v2, %[x1d]\n"
+ "vxor.vv v1, v3, v2\n"
+ ".option pop\n"
+ : :
+ [x1d]"r"(0x1d)
+ );
+ }
+
+ /*
+ * *(unative_t *)&p[d+NSIZE*$$] ^= wp$$;
+ * *(unative_t *)&q[d+NSIZE*$$] ^= wq$$;
+ * v0:wp0, v1:wq0, v2:p0, v3:q0
+ */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vle8.v v2, (%[wp0])\n"
+ "vle8.v v3, (%[wq0])\n"
+ "vxor.vv v2, v2, v0\n"
+ "vxor.vv v3, v3, v1\n"
+ "vse8.v v2, (%[wp0])\n"
+ "vse8.v v3, (%[wq0])\n"
+ ".option pop\n"
+ : :
+ [wp0]"r"(&p[d + NSIZE * 0]),
+ [wq0]"r"(&q[d + NSIZE * 0])
+ );
+ }
+}
+
+static void raid6_rvv2_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ unsigned long d;
+ int z, z0;
+ u8 *p, *q;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0 + 1]; /* XOR parity */
+ q = dptr[z0 + 2]; /* RS syndrome */
+
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vsetvli t0, x0, e8, m1, ta, ma\n"
+ ".option pop\n"
+ );
+
+ /*
+ * v0:wp0, v1:wq0, v2:wd0/w20, v3:w10
+ * v4:wp1, v5:wq1, v6:wd1/w21, v7:w11
+ */
+ for (d = 0; d < bytes; d += NSIZE * 2) {
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vle8.v v0, (%[wp0])\n"
+ "vle8.v v1, (%[wp0])\n"
+ "vle8.v v4, (%[wp1])\n"
+ "vle8.v v5, (%[wp1])\n"
+ ".option pop\n"
+ : :
+ [wp0]"r"(&dptr[z0][d + 0 * NSIZE]),
+ [wp1]"r"(&dptr[z0][d + 1 * NSIZE])
+ );
+
+ for (z = z0 - 1; z >= 0; z--) {
+ /*
+ * w2$$ = MASK(wq$$);
+ * w1$$ = SHLBYTE(wq$$);
+ * w2$$ &= NBYTES(0x1d);
+ * w1$$ ^= w2$$;
+ * wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
+ * wq$$ = w1$$ ^ wd$$;
+ * wp$$ ^= wd$$;
+ */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vsra.vi v2, v1, 7\n"
+ "vsll.vi v3, v1, 1\n"
+ "vand.vx v2, v2, %[x1d]\n"
+ "vxor.vv v3, v3, v2\n"
+ "vle8.v v2, (%[wd0])\n"
+ "vxor.vv v1, v3, v2\n"
+ "vxor.vv v0, v0, v2\n"
+
+ "vsra.vi v6, v5, 7\n"
+ "vsll.vi v7, v5, 1\n"
+ "vand.vx v6, v6, %[x1d]\n"
+ "vxor.vv v7, v7, v6\n"
+ "vle8.v v6, (%[wd1])\n"
+ "vxor.vv v5, v7, v6\n"
+ "vxor.vv v4, v4, v6\n"
+ ".option pop\n"
+ : :
+ [wd0]"r"(&dptr[z][d + 0 * NSIZE]),
+ [wd1]"r"(&dptr[z][d + 1 * NSIZE]),
+ [x1d]"r"(0x1d)
+ );
+ }
+
+ /*
+ * *(unative_t *)&p[d+NSIZE*$$] = wp$$;
+ * *(unative_t *)&q[d+NSIZE*$$] = wq$$;
+ */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vse8.v v0, (%[wp0])\n"
+ "vse8.v v1, (%[wq0])\n"
+ "vse8.v v4, (%[wp1])\n"
+ "vse8.v v5, (%[wq1])\n"
+ ".option pop\n"
+ : :
+ [wp0]"r"(&p[d + NSIZE * 0]),
+ [wq0]"r"(&q[d + NSIZE * 0]),
+ [wp1]"r"(&p[d + NSIZE * 1]),
+ [wq1]"r"(&q[d + NSIZE * 1])
+ );
+ }
+}
+
+static void raid6_rvv2_xor_syndrome_real(int disks, int start, int stop,
+ unsigned long bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ unsigned long d;
+ int z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks - 2]; /* XOR parity */
+ q = dptr[disks - 1]; /* RS syndrome */
+
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vsetvli t0, x0, e8, m1, ta, ma\n"
+ ".option pop\n"
+ );
+
+ /*
+ * v0:wp0, v1:wq0, v2:wd0/w20, v3:w10
+ * v4:wp1, v5:wq1, v6:wd1/w21, v7:w11
+ */
+ for (d = 0; d < bytes; d += NSIZE * 2) {
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vle8.v v0, (%[wp0])\n"
+ "vle8.v v1, (%[wp0])\n"
+ "vle8.v v4, (%[wp1])\n"
+ "vle8.v v5, (%[wp1])\n"
+ ".option pop\n"
+ : :
+ [wp0]"r"(&dptr[z0][d + 0 * NSIZE]),
+ [wp1]"r"(&dptr[z0][d + 1 * NSIZE])
+ );
+
+ /* P/Q data pages */
+ for (z = z0 - 1; z >= start; z--) {
+ /*
+ * w2$$ = MASK(wq$$);
+ * w1$$ = SHLBYTE(wq$$);
+ * w2$$ &= NBYTES(0x1d);
+ * w1$$ ^= w2$$;
+ * wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
+ * wq$$ = w1$$ ^ wd$$;
+ * wp$$ ^= wd$$;
+ */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vsra.vi v2, v1, 7\n"
+ "vsll.vi v3, v1, 1\n"
+ "vand.vx v2, v2, %[x1d]\n"
+ "vxor.vv v3, v3, v2\n"
+ "vle8.v v2, (%[wd0])\n"
+ "vxor.vv v1, v3, v2\n"
+ "vxor.vv v0, v0, v2\n"
+
+ "vsra.vi v6, v5, 7\n"
+ "vsll.vi v7, v5, 1\n"
+ "vand.vx v6, v6, %[x1d]\n"
+ "vxor.vv v7, v7, v6\n"
+ "vle8.v v6, (%[wd1])\n"
+ "vxor.vv v5, v7, v6\n"
+ "vxor.vv v4, v4, v6\n"
+ ".option pop\n"
+ : :
+ [wd0]"r"(&dptr[z][d + 0 * NSIZE]),
+ [wd1]"r"(&dptr[z][d + 1 * NSIZE]),
+ [x1d]"r"(0x1d)
+ );
+ }
+
+ /* P/Q left side optimization */
+ for (z = start - 1; z >= 0; z--) {
+ /*
+ * w2$$ = MASK(wq$$);
+ * w1$$ = SHLBYTE(wq$$);
+ * w2$$ &= NBYTES(0x1d);
+ * wq$$ = w1$$ ^ w2$$;
+ */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vsra.vi v2, v1, 7\n"
+ "vsll.vi v3, v1, 1\n"
+ "vand.vx v2, v2, %[x1d]\n"
+ "vxor.vv v1, v3, v2\n"
+
+ "vsra.vi v6, v5, 7\n"
+ "vsll.vi v7, v5, 1\n"
+ "vand.vx v6, v6, %[x1d]\n"
+ "vxor.vv v5, v7, v6\n"
+ ".option pop\n"
+ : :
+ [x1d]"r"(0x1d)
+ );
+ }
+
+ /*
+ * *(unative_t *)&p[d+NSIZE*$$] ^= wp$$;
+ * *(unative_t *)&q[d+NSIZE*$$] ^= wq$$;
+ * v0:wp0, v1:wq0, v2:p0, v3:q0
+ * v4:wp1, v5:wq1, v6:p1, v7:q1
+ */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vle8.v v2, (%[wp0])\n"
+ "vle8.v v3, (%[wq0])\n"
+ "vxor.vv v2, v2, v0\n"
+ "vxor.vv v3, v3, v1\n"
+ "vse8.v v2, (%[wp0])\n"
+ "vse8.v v3, (%[wq0])\n"
+
+ "vle8.v v6, (%[wp1])\n"
+ "vle8.v v7, (%[wq1])\n"
+ "vxor.vv v6, v6, v4\n"
+ "vxor.vv v7, v7, v5\n"
+ "vse8.v v6, (%[wp1])\n"
+ "vse8.v v7, (%[wq1])\n"
+ ".option pop\n"
+ : :
+ [wp0]"r"(&p[d + NSIZE * 0]),
+ [wq0]"r"(&q[d + NSIZE * 0]),
+ [wp1]"r"(&p[d + NSIZE * 1]),
+ [wq1]"r"(&q[d + NSIZE * 1])
+ );
+ }
+}
+
+static void raid6_rvv4_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ unsigned long d;
+ int z, z0;
+ u8 *p, *q;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0 + 1]; /* XOR parity */
+ q = dptr[z0 + 2]; /* RS syndrome */
+
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vsetvli t0, x0, e8, m1, ta, ma\n"
+ ".option pop\n"
+ );
+
+ /*
+ * v0:wp0, v1:wq0, v2:wd0/w20, v3:w10
+ * v4:wp1, v5:wq1, v6:wd1/w21, v7:w11
+ * v8:wp2, v9:wq2, v10:wd2/w22, v11:w12
+ * v12:wp3, v13:wq3, v14:wd3/w23, v15:w13
+ */
+ for (d = 0; d < bytes; d += NSIZE * 4) {
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vle8.v v0, (%[wp0])\n"
+ "vle8.v v1, (%[wp0])\n"
+ "vle8.v v4, (%[wp1])\n"
+ "vle8.v v5, (%[wp1])\n"
+ "vle8.v v8, (%[wp2])\n"
+ "vle8.v v9, (%[wp2])\n"
+ "vle8.v v12, (%[wp3])\n"
+ "vle8.v v13, (%[wp3])\n"
+ ".option pop\n"
+ : :
+ [wp0]"r"(&dptr[z0][d + 0 * NSIZE]),
+ [wp1]"r"(&dptr[z0][d + 1 * NSIZE]),
+ [wp2]"r"(&dptr[z0][d + 2 * NSIZE]),
+ [wp3]"r"(&dptr[z0][d + 3 * NSIZE])
+ );
+
+ for (z = z0 - 1; z >= 0; z--) {
+ /*
+ * w2$$ = MASK(wq$$);
+ * w1$$ = SHLBYTE(wq$$);
+ * w2$$ &= NBYTES(0x1d);
+ * w1$$ ^= w2$$;
+ * wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
+ * wq$$ = w1$$ ^ wd$$;
+ * wp$$ ^= wd$$;
+ */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vsra.vi v2, v1, 7\n"
+ "vsll.vi v3, v1, 1\n"
+ "vand.vx v2, v2, %[x1d]\n"
+ "vxor.vv v3, v3, v2\n"
+ "vle8.v v2, (%[wd0])\n"
+ "vxor.vv v1, v3, v2\n"
+ "vxor.vv v0, v0, v2\n"
+
+ "vsra.vi v6, v5, 7\n"
+ "vsll.vi v7, v5, 1\n"
+ "vand.vx v6, v6, %[x1d]\n"
+ "vxor.vv v7, v7, v6\n"
+ "vle8.v v6, (%[wd1])\n"
+ "vxor.vv v5, v7, v6\n"
+ "vxor.vv v4, v4, v6\n"
+
+ "vsra.vi v10, v9, 7\n"
+ "vsll.vi v11, v9, 1\n"
+ "vand.vx v10, v10, %[x1d]\n"
+ "vxor.vv v11, v11, v10\n"
+ "vle8.v v10, (%[wd2])\n"
+ "vxor.vv v9, v11, v10\n"
+ "vxor.vv v8, v8, v10\n"
+
+ "vsra.vi v14, v13, 7\n"
+ "vsll.vi v15, v13, 1\n"
+ "vand.vx v14, v14, %[x1d]\n"
+ "vxor.vv v15, v15, v14\n"
+ "vle8.v v14, (%[wd3])\n"
+ "vxor.vv v13, v15, v14\n"
+ "vxor.vv v12, v12, v14\n"
+ ".option pop\n"
+ : :
+ [wd0]"r"(&dptr[z][d + 0 * NSIZE]),
+ [wd1]"r"(&dptr[z][d + 1 * NSIZE]),
+ [wd2]"r"(&dptr[z][d + 2 * NSIZE]),
+ [wd3]"r"(&dptr[z][d + 3 * NSIZE]),
+ [x1d]"r"(0x1d)
+ );
+ }
+
+ /*
+ * *(unative_t *)&p[d+NSIZE*$$] = wp$$;
+ * *(unative_t *)&q[d+NSIZE*$$] = wq$$;
+ */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vse8.v v0, (%[wp0])\n"
+ "vse8.v v1, (%[wq0])\n"
+ "vse8.v v4, (%[wp1])\n"
+ "vse8.v v5, (%[wq1])\n"
+ "vse8.v v8, (%[wp2])\n"
+ "vse8.v v9, (%[wq2])\n"
+ "vse8.v v12, (%[wp3])\n"
+ "vse8.v v13, (%[wq3])\n"
+ ".option pop\n"
+ : :
+ [wp0]"r"(&p[d + NSIZE * 0]),
+ [wq0]"r"(&q[d + NSIZE * 0]),
+ [wp1]"r"(&p[d + NSIZE * 1]),
+ [wq1]"r"(&q[d + NSIZE * 1]),
+ [wp2]"r"(&p[d + NSIZE * 2]),
+ [wq2]"r"(&q[d + NSIZE * 2]),
+ [wp3]"r"(&p[d + NSIZE * 3]),
+ [wq3]"r"(&q[d + NSIZE * 3])
+ );
+ }
+}
+
+static void raid6_rvv4_xor_syndrome_real(int disks, int start, int stop,
+ unsigned long bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ unsigned long d;
+ int z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks - 2]; /* XOR parity */
+ q = dptr[disks - 1]; /* RS syndrome */
+
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vsetvli t0, x0, e8, m1, ta, ma\n"
+ ".option pop\n"
+ );
+
+ /*
+ * v0:wp0, v1:wq0, v2:wd0/w20, v3:w10
+ * v4:wp1, v5:wq1, v6:wd1/w21, v7:w11
+ * v8:wp2, v9:wq2, v10:wd2/w22, v11:w12
+ * v12:wp3, v13:wq3, v14:wd3/w23, v15:w13
+ */
+ for (d = 0; d < bytes; d += NSIZE * 4) {
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vle8.v v0, (%[wp0])\n"
+ "vle8.v v1, (%[wp0])\n"
+ "vle8.v v4, (%[wp1])\n"
+ "vle8.v v5, (%[wp1])\n"
+ "vle8.v v8, (%[wp2])\n"
+ "vle8.v v9, (%[wp2])\n"
+ "vle8.v v12, (%[wp3])\n"
+ "vle8.v v13, (%[wp3])\n"
+ ".option pop\n"
+ : :
+ [wp0]"r"(&dptr[z0][d + 0 * NSIZE]),
+ [wp1]"r"(&dptr[z0][d + 1 * NSIZE]),
+ [wp2]"r"(&dptr[z0][d + 2 * NSIZE]),
+ [wp3]"r"(&dptr[z0][d + 3 * NSIZE])
+ );
+
+ /* P/Q data pages */
+ for (z = z0 - 1; z >= start; z--) {
+ /*
+ * w2$$ = MASK(wq$$);
+ * w1$$ = SHLBYTE(wq$$);
+ * w2$$ &= NBYTES(0x1d);
+ * w1$$ ^= w2$$;
+ * wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
+ * wq$$ = w1$$ ^ wd$$;
+ * wp$$ ^= wd$$;
+ */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vsra.vi v2, v1, 7\n"
+ "vsll.vi v3, v1, 1\n"
+ "vand.vx v2, v2, %[x1d]\n"
+ "vxor.vv v3, v3, v2\n"
+ "vle8.v v2, (%[wd0])\n"
+ "vxor.vv v1, v3, v2\n"
+ "vxor.vv v0, v0, v2\n"
+
+ "vsra.vi v6, v5, 7\n"
+ "vsll.vi v7, v5, 1\n"
+ "vand.vx v6, v6, %[x1d]\n"
+ "vxor.vv v7, v7, v6\n"
+ "vle8.v v6, (%[wd1])\n"
+ "vxor.vv v5, v7, v6\n"
+ "vxor.vv v4, v4, v6\n"
+
+ "vsra.vi v10, v9, 7\n"
+ "vsll.vi v11, v9, 1\n"
+ "vand.vx v10, v10, %[x1d]\n"
+ "vxor.vv v11, v11, v10\n"
+ "vle8.v v10, (%[wd2])\n"
+ "vxor.vv v9, v11, v10\n"
+ "vxor.vv v8, v8, v10\n"
+
+ "vsra.vi v14, v13, 7\n"
+ "vsll.vi v15, v13, 1\n"
+ "vand.vx v14, v14, %[x1d]\n"
+ "vxor.vv v15, v15, v14\n"
+ "vle8.v v14, (%[wd3])\n"
+ "vxor.vv v13, v15, v14\n"
+ "vxor.vv v12, v12, v14\n"
+ ".option pop\n"
+ : :
+ [wd0]"r"(&dptr[z][d + 0 * NSIZE]),
+ [wd1]"r"(&dptr[z][d + 1 * NSIZE]),
+ [wd2]"r"(&dptr[z][d + 2 * NSIZE]),
+ [wd3]"r"(&dptr[z][d + 3 * NSIZE]),
+ [x1d]"r"(0x1d)
+ );
+ }
+
+ /* P/Q left side optimization */
+ for (z = start - 1; z >= 0; z--) {
+ /*
+ * w2$$ = MASK(wq$$);
+ * w1$$ = SHLBYTE(wq$$);
+ * w2$$ &= NBYTES(0x1d);
+ * wq$$ = w1$$ ^ w2$$;
+ */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vsra.vi v2, v1, 7\n"
+ "vsll.vi v3, v1, 1\n"
+ "vand.vx v2, v2, %[x1d]\n"
+ "vxor.vv v1, v3, v2\n"
+
+ "vsra.vi v6, v5, 7\n"
+ "vsll.vi v7, v5, 1\n"
+ "vand.vx v6, v6, %[x1d]\n"
+ "vxor.vv v5, v7, v6\n"
+
+ "vsra.vi v10, v9, 7\n"
+ "vsll.vi v11, v9, 1\n"
+ "vand.vx v10, v10, %[x1d]\n"
+ "vxor.vv v9, v11, v10\n"
+
+ "vsra.vi v14, v13, 7\n"
+ "vsll.vi v15, v13, 1\n"
+ "vand.vx v14, v14, %[x1d]\n"
+ "vxor.vv v13, v15, v14\n"
+ ".option pop\n"
+ : :
+ [x1d]"r"(0x1d)
+ );
+ }
+
+ /*
+ * *(unative_t *)&p[d+NSIZE*$$] ^= wp$$;
+ * *(unative_t *)&q[d+NSIZE*$$] ^= wq$$;
+ * v0:wp0, v1:wq0, v2:p0, v3:q0
+ * v4:wp1, v5:wq1, v6:p1, v7:q1
+ * v8:wp2, v9:wq2, v10:p2, v11:q2
+ * v12:wp3, v13:wq3, v14:p3, v15:q3
+ */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vle8.v v2, (%[wp0])\n"
+ "vle8.v v3, (%[wq0])\n"
+ "vxor.vv v2, v2, v0\n"
+ "vxor.vv v3, v3, v1\n"
+ "vse8.v v2, (%[wp0])\n"
+ "vse8.v v3, (%[wq0])\n"
+
+ "vle8.v v6, (%[wp1])\n"
+ "vle8.v v7, (%[wq1])\n"
+ "vxor.vv v6, v6, v4\n"
+ "vxor.vv v7, v7, v5\n"
+ "vse8.v v6, (%[wp1])\n"
+ "vse8.v v7, (%[wq1])\n"
+
+ "vle8.v v10, (%[wp2])\n"
+ "vle8.v v11, (%[wq2])\n"
+ "vxor.vv v10, v10, v8\n"
+ "vxor.vv v11, v11, v9\n"
+ "vse8.v v10, (%[wp2])\n"
+ "vse8.v v11, (%[wq2])\n"
+
+ "vle8.v v14, (%[wp3])\n"
+ "vle8.v v15, (%[wq3])\n"
+ "vxor.vv v14, v14, v12\n"
+ "vxor.vv v15, v15, v13\n"
+ "vse8.v v14, (%[wp3])\n"
+ "vse8.v v15, (%[wq3])\n"
+ ".option pop\n"
+ : :
+ [wp0]"r"(&p[d + NSIZE * 0]),
+ [wq0]"r"(&q[d + NSIZE * 0]),
+ [wp1]"r"(&p[d + NSIZE * 1]),
+ [wq1]"r"(&q[d + NSIZE * 1]),
+ [wp2]"r"(&p[d + NSIZE * 2]),
+ [wq2]"r"(&q[d + NSIZE * 2]),
+ [wp3]"r"(&p[d + NSIZE * 3]),
+ [wq3]"r"(&q[d + NSIZE * 3])
+ );
+ }
+}
+
+static void raid6_rvv8_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ unsigned long d;
+ int z, z0;
+ u8 *p, *q;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0 + 1]; /* XOR parity */
+ q = dptr[z0 + 2]; /* RS syndrome */
+
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vsetvli t0, x0, e8, m1, ta, ma\n"
+ ".option pop\n"
+ );
+
+ /*
+ * v0:wp0, v1:wq0, v2:wd0/w20, v3:w10
+ * v4:wp1, v5:wq1, v6:wd1/w21, v7:w11
+ * v8:wp2, v9:wq2, v10:wd2/w22, v11:w12
+ * v12:wp3, v13:wq3, v14:wd3/w23, v15:w13
+ * v16:wp4, v17:wq4, v18:wd4/w24, v19:w14
+ * v20:wp5, v21:wq5, v22:wd5/w25, v23:w15
+ * v24:wp6, v25:wq6, v26:wd6/w26, v27:w16
+ * v28:wp7, v29:wq7, v30:wd7/w27, v31:w17
+ */
+ for (d = 0; d < bytes; d += NSIZE * 8) {
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vle8.v v0, (%[wp0])\n"
+ "vle8.v v1, (%[wp0])\n"
+ "vle8.v v4, (%[wp1])\n"
+ "vle8.v v5, (%[wp1])\n"
+ "vle8.v v8, (%[wp2])\n"
+ "vle8.v v9, (%[wp2])\n"
+ "vle8.v v12, (%[wp3])\n"
+ "vle8.v v13, (%[wp3])\n"
+ "vle8.v v16, (%[wp4])\n"
+ "vle8.v v17, (%[wp4])\n"
+ "vle8.v v20, (%[wp5])\n"
+ "vle8.v v21, (%[wp5])\n"
+ "vle8.v v24, (%[wp6])\n"
+ "vle8.v v25, (%[wp6])\n"
+ "vle8.v v28, (%[wp7])\n"
+ "vle8.v v29, (%[wp7])\n"
+ ".option pop\n"
+ : :
+ [wp0]"r"(&dptr[z0][d + 0 * NSIZE]),
+ [wp1]"r"(&dptr[z0][d + 1 * NSIZE]),
+ [wp2]"r"(&dptr[z0][d + 2 * NSIZE]),
+ [wp3]"r"(&dptr[z0][d + 3 * NSIZE]),
+ [wp4]"r"(&dptr[z0][d + 4 * NSIZE]),
+ [wp5]"r"(&dptr[z0][d + 5 * NSIZE]),
+ [wp6]"r"(&dptr[z0][d + 6 * NSIZE]),
+ [wp7]"r"(&dptr[z0][d + 7 * NSIZE])
+ );
+
+ for (z = z0 - 1; z >= 0; z--) {
+ /*
+ * w2$$ = MASK(wq$$);
+ * w1$$ = SHLBYTE(wq$$);
+ * w2$$ &= NBYTES(0x1d);
+ * w1$$ ^= w2$$;
+ * wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
+ * wq$$ = w1$$ ^ wd$$;
+ * wp$$ ^= wd$$;
+ */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vsra.vi v2, v1, 7\n"
+ "vsll.vi v3, v1, 1\n"
+ "vand.vx v2, v2, %[x1d]\n"
+ "vxor.vv v3, v3, v2\n"
+ "vle8.v v2, (%[wd0])\n"
+ "vxor.vv v1, v3, v2\n"
+ "vxor.vv v0, v0, v2\n"
+
+ "vsra.vi v6, v5, 7\n"
+ "vsll.vi v7, v5, 1\n"
+ "vand.vx v6, v6, %[x1d]\n"
+ "vxor.vv v7, v7, v6\n"
+ "vle8.v v6, (%[wd1])\n"
+ "vxor.vv v5, v7, v6\n"
+ "vxor.vv v4, v4, v6\n"
+
+ "vsra.vi v10, v9, 7\n"
+ "vsll.vi v11, v9, 1\n"
+ "vand.vx v10, v10, %[x1d]\n"
+ "vxor.vv v11, v11, v10\n"
+ "vle8.v v10, (%[wd2])\n"
+ "vxor.vv v9, v11, v10\n"
+ "vxor.vv v8, v8, v10\n"
+
+ "vsra.vi v14, v13, 7\n"
+ "vsll.vi v15, v13, 1\n"
+ "vand.vx v14, v14, %[x1d]\n"
+ "vxor.vv v15, v15, v14\n"
+ "vle8.v v14, (%[wd3])\n"
+ "vxor.vv v13, v15, v14\n"
+ "vxor.vv v12, v12, v14\n"
+
+ "vsra.vi v18, v17, 7\n"
+ "vsll.vi v19, v17, 1\n"
+ "vand.vx v18, v18, %[x1d]\n"
+ "vxor.vv v19, v19, v18\n"
+ "vle8.v v18, (%[wd4])\n"
+ "vxor.vv v17, v19, v18\n"
+ "vxor.vv v16, v16, v18\n"
+
+ "vsra.vi v22, v21, 7\n"
+ "vsll.vi v23, v21, 1\n"
+ "vand.vx v22, v22, %[x1d]\n"
+ "vxor.vv v23, v23, v22\n"
+ "vle8.v v22, (%[wd5])\n"
+ "vxor.vv v21, v23, v22\n"
+ "vxor.vv v20, v20, v22\n"
+
+ "vsra.vi v26, v25, 7\n"
+ "vsll.vi v27, v25, 1\n"
+ "vand.vx v26, v26, %[x1d]\n"
+ "vxor.vv v27, v27, v26\n"
+ "vle8.v v26, (%[wd6])\n"
+ "vxor.vv v25, v27, v26\n"
+ "vxor.vv v24, v24, v26\n"
+
+ "vsra.vi v30, v29, 7\n"
+ "vsll.vi v31, v29, 1\n"
+ "vand.vx v30, v30, %[x1d]\n"
+ "vxor.vv v31, v31, v30\n"
+ "vle8.v v30, (%[wd7])\n"
+ "vxor.vv v29, v31, v30\n"
+ "vxor.vv v28, v28, v30\n"
+ ".option pop\n"
+ : :
+ [wd0]"r"(&dptr[z][d + 0 * NSIZE]),
+ [wd1]"r"(&dptr[z][d + 1 * NSIZE]),
+ [wd2]"r"(&dptr[z][d + 2 * NSIZE]),
+ [wd3]"r"(&dptr[z][d + 3 * NSIZE]),
+ [wd4]"r"(&dptr[z][d + 4 * NSIZE]),
+ [wd5]"r"(&dptr[z][d + 5 * NSIZE]),
+ [wd6]"r"(&dptr[z][d + 6 * NSIZE]),
+ [wd7]"r"(&dptr[z][d + 7 * NSIZE]),
+ [x1d]"r"(0x1d)
+ );
+ }
+
+ /*
+ * *(unative_t *)&p[d+NSIZE*$$] = wp$$;
+ * *(unative_t *)&q[d+NSIZE*$$] = wq$$;
+ */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vse8.v v0, (%[wp0])\n"
+ "vse8.v v1, (%[wq0])\n"
+ "vse8.v v4, (%[wp1])\n"
+ "vse8.v v5, (%[wq1])\n"
+ "vse8.v v8, (%[wp2])\n"
+ "vse8.v v9, (%[wq2])\n"
+ "vse8.v v12, (%[wp3])\n"
+ "vse8.v v13, (%[wq3])\n"
+ "vse8.v v16, (%[wp4])\n"
+ "vse8.v v17, (%[wq4])\n"
+ "vse8.v v20, (%[wp5])\n"
+ "vse8.v v21, (%[wq5])\n"
+ "vse8.v v24, (%[wp6])\n"
+ "vse8.v v25, (%[wq6])\n"
+ "vse8.v v28, (%[wp7])\n"
+ "vse8.v v29, (%[wq7])\n"
+ ".option pop\n"
+ : :
+ [wp0]"r"(&p[d + NSIZE * 0]),
+ [wq0]"r"(&q[d + NSIZE * 0]),
+ [wp1]"r"(&p[d + NSIZE * 1]),
+ [wq1]"r"(&q[d + NSIZE * 1]),
+ [wp2]"r"(&p[d + NSIZE * 2]),
+ [wq2]"r"(&q[d + NSIZE * 2]),
+ [wp3]"r"(&p[d + NSIZE * 3]),
+ [wq3]"r"(&q[d + NSIZE * 3]),
+ [wp4]"r"(&p[d + NSIZE * 4]),
+ [wq4]"r"(&q[d + NSIZE * 4]),
+ [wp5]"r"(&p[d + NSIZE * 5]),
+ [wq5]"r"(&q[d + NSIZE * 5]),
+ [wp6]"r"(&p[d + NSIZE * 6]),
+ [wq6]"r"(&q[d + NSIZE * 6]),
+ [wp7]"r"(&p[d + NSIZE * 7]),
+ [wq7]"r"(&q[d + NSIZE * 7])
+ );
+ }
+}
+
+static void raid6_rvv8_xor_syndrome_real(int disks, int start, int stop,
+ unsigned long bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ unsigned long d;
+ int z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks - 2]; /* XOR parity */
+ q = dptr[disks - 1]; /* RS syndrome */
+
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vsetvli t0, x0, e8, m1, ta, ma\n"
+ ".option pop\n"
+ );
+
+ /*
+ * v0:wp0, v1:wq0, v2:wd0/w20, v3:w10
+ * v4:wp1, v5:wq1, v6:wd1/w21, v7:w11
+ * v8:wp2, v9:wq2, v10:wd2/w22, v11:w12
+ * v12:wp3, v13:wq3, v14:wd3/w23, v15:w13
+ * v16:wp4, v17:wq4, v18:wd4/w24, v19:w14
+ * v20:wp5, v21:wq5, v22:wd5/w25, v23:w15
+ * v24:wp6, v25:wq6, v26:wd6/w26, v27:w16
+ * v28:wp7, v29:wq7, v30:wd7/w27, v31:w17
+ */
+ for (d = 0; d < bytes; d += NSIZE * 8) {
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vle8.v v0, (%[wp0])\n"
+ "vle8.v v1, (%[wp0])\n"
+ "vle8.v v4, (%[wp1])\n"
+ "vle8.v v5, (%[wp1])\n"
+ "vle8.v v8, (%[wp2])\n"
+ "vle8.v v9, (%[wp2])\n"
+ "vle8.v v12, (%[wp3])\n"
+ "vle8.v v13, (%[wp3])\n"
+ "vle8.v v16, (%[wp4])\n"
+ "vle8.v v17, (%[wp4])\n"
+ "vle8.v v20, (%[wp5])\n"
+ "vle8.v v21, (%[wp5])\n"
+ "vle8.v v24, (%[wp6])\n"
+ "vle8.v v25, (%[wp6])\n"
+ "vle8.v v28, (%[wp7])\n"
+ "vle8.v v29, (%[wp7])\n"
+ ".option pop\n"
+ : :
+ [wp0]"r"(&dptr[z0][d + 0 * NSIZE]),
+ [wp1]"r"(&dptr[z0][d + 1 * NSIZE]),
+ [wp2]"r"(&dptr[z0][d + 2 * NSIZE]),
+ [wp3]"r"(&dptr[z0][d + 3 * NSIZE]),
+ [wp4]"r"(&dptr[z0][d + 4 * NSIZE]),
+ [wp5]"r"(&dptr[z0][d + 5 * NSIZE]),
+ [wp6]"r"(&dptr[z0][d + 6 * NSIZE]),
+ [wp7]"r"(&dptr[z0][d + 7 * NSIZE])
+ );
+
+ /* P/Q data pages */
+ for (z = z0 - 1; z >= start; z--) {
+ /*
+ * w2$$ = MASK(wq$$);
+ * w1$$ = SHLBYTE(wq$$);
+ * w2$$ &= NBYTES(0x1d);
+ * w1$$ ^= w2$$;
+ * wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
+ * wq$$ = w1$$ ^ wd$$;
+ * wp$$ ^= wd$$;
+ */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vsra.vi v2, v1, 7\n"
+ "vsll.vi v3, v1, 1\n"
+ "vand.vx v2, v2, %[x1d]\n"
+ "vxor.vv v3, v3, v2\n"
+ "vle8.v v2, (%[wd0])\n"
+ "vxor.vv v1, v3, v2\n"
+ "vxor.vv v0, v0, v2\n"
+
+ "vsra.vi v6, v5, 7\n"
+ "vsll.vi v7, v5, 1\n"
+ "vand.vx v6, v6, %[x1d]\n"
+ "vxor.vv v7, v7, v6\n"
+ "vle8.v v6, (%[wd1])\n"
+ "vxor.vv v5, v7, v6\n"
+ "vxor.vv v4, v4, v6\n"
+
+ "vsra.vi v10, v9, 7\n"
+ "vsll.vi v11, v9, 1\n"
+ "vand.vx v10, v10, %[x1d]\n"
+ "vxor.vv v11, v11, v10\n"
+ "vle8.v v10, (%[wd2])\n"
+ "vxor.vv v9, v11, v10\n"
+ "vxor.vv v8, v8, v10\n"
+
+ "vsra.vi v14, v13, 7\n"
+ "vsll.vi v15, v13, 1\n"
+ "vand.vx v14, v14, %[x1d]\n"
+ "vxor.vv v15, v15, v14\n"
+ "vle8.v v14, (%[wd3])\n"
+ "vxor.vv v13, v15, v14\n"
+ "vxor.vv v12, v12, v14\n"
+
+ "vsra.vi v18, v17, 7\n"
+ "vsll.vi v19, v17, 1\n"
+ "vand.vx v18, v18, %[x1d]\n"
+ "vxor.vv v19, v19, v18\n"
+ "vle8.v v18, (%[wd4])\n"
+ "vxor.vv v17, v19, v18\n"
+ "vxor.vv v16, v16, v18\n"
+
+ "vsra.vi v22, v21, 7\n"
+ "vsll.vi v23, v21, 1\n"
+ "vand.vx v22, v22, %[x1d]\n"
+ "vxor.vv v23, v23, v22\n"
+ "vle8.v v22, (%[wd5])\n"
+ "vxor.vv v21, v23, v22\n"
+ "vxor.vv v20, v20, v22\n"
+
+ "vsra.vi v26, v25, 7\n"
+ "vsll.vi v27, v25, 1\n"
+ "vand.vx v26, v26, %[x1d]\n"
+ "vxor.vv v27, v27, v26\n"
+ "vle8.v v26, (%[wd6])\n"
+ "vxor.vv v25, v27, v26\n"
+ "vxor.vv v24, v24, v26\n"
+
+ "vsra.vi v30, v29, 7\n"
+ "vsll.vi v31, v29, 1\n"
+ "vand.vx v30, v30, %[x1d]\n"
+ "vxor.vv v31, v31, v30\n"
+ "vle8.v v30, (%[wd7])\n"
+ "vxor.vv v29, v31, v30\n"
+ "vxor.vv v28, v28, v30\n"
+ ".option pop\n"
+ : :
+ [wd0]"r"(&dptr[z][d + 0 * NSIZE]),
+ [wd1]"r"(&dptr[z][d + 1 * NSIZE]),
+ [wd2]"r"(&dptr[z][d + 2 * NSIZE]),
+ [wd3]"r"(&dptr[z][d + 3 * NSIZE]),
+ [wd4]"r"(&dptr[z][d + 4 * NSIZE]),
+ [wd5]"r"(&dptr[z][d + 5 * NSIZE]),
+ [wd6]"r"(&dptr[z][d + 6 * NSIZE]),
+ [wd7]"r"(&dptr[z][d + 7 * NSIZE]),
+ [x1d]"r"(0x1d)
+ );
+ }
+
+ /* P/Q left side optimization */
+ for (z = start - 1; z >= 0; z--) {
+ /*
+ * w2$$ = MASK(wq$$);
+ * w1$$ = SHLBYTE(wq$$);
+ * w2$$ &= NBYTES(0x1d);
+ * wq$$ = w1$$ ^ w2$$;
+ */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vsra.vi v2, v1, 7\n"
+ "vsll.vi v3, v1, 1\n"
+ "vand.vx v2, v2, %[x1d]\n"
+ "vxor.vv v1, v3, v2\n"
+
+ "vsra.vi v6, v5, 7\n"
+ "vsll.vi v7, v5, 1\n"
+ "vand.vx v6, v6, %[x1d]\n"
+ "vxor.vv v5, v7, v6\n"
+
+ "vsra.vi v10, v9, 7\n"
+ "vsll.vi v11, v9, 1\n"
+ "vand.vx v10, v10, %[x1d]\n"
+ "vxor.vv v9, v11, v10\n"
+
+ "vsra.vi v14, v13, 7\n"
+ "vsll.vi v15, v13, 1\n"
+ "vand.vx v14, v14, %[x1d]\n"
+ "vxor.vv v13, v15, v14\n"
+
+ "vsra.vi v18, v17, 7\n"
+ "vsll.vi v19, v17, 1\n"
+ "vand.vx v18, v18, %[x1d]\n"
+ "vxor.vv v17, v19, v18\n"
+
+ "vsra.vi v22, v21, 7\n"
+ "vsll.vi v23, v21, 1\n"
+ "vand.vx v22, v22, %[x1d]\n"
+ "vxor.vv v21, v23, v22\n"
+
+ "vsra.vi v26, v25, 7\n"
+ "vsll.vi v27, v25, 1\n"
+ "vand.vx v26, v26, %[x1d]\n"
+ "vxor.vv v25, v27, v26\n"
+
+ "vsra.vi v30, v29, 7\n"
+ "vsll.vi v31, v29, 1\n"
+ "vand.vx v30, v30, %[x1d]\n"
+ "vxor.vv v29, v31, v30\n"
+ ".option pop\n"
+ : :
+ [x1d]"r"(0x1d)
+ );
+ }
+
+ /*
+ * *(unative_t *)&p[d+NSIZE*$$] ^= wp$$;
+ * *(unative_t *)&q[d+NSIZE*$$] ^= wq$$;
+ * v0:wp0, v1:wq0, v2:p0, v3:q0
+ * v4:wp1, v5:wq1, v6:p1, v7:q1
+ * v8:wp2, v9:wq2, v10:p2, v11:q2
+ * v12:wp3, v13:wq3, v14:p3, v15:q3
+ * v16:wp4, v17:wq4, v18:p4, v19:q4
+ * v20:wp5, v21:wq5, v22:p5, v23:q5
+ * v24:wp6, v25:wq6, v26:p6, v27:q6
+ * v28:wp7, v29:wq7, v30:p7, v31:q7
+ */
+ asm volatile (".option push\n"
+ ".option arch,+v\n"
+ "vle8.v v2, (%[wp0])\n"
+ "vle8.v v3, (%[wq0])\n"
+ "vxor.vv v2, v2, v0\n"
+ "vxor.vv v3, v3, v1\n"
+ "vse8.v v2, (%[wp0])\n"
+ "vse8.v v3, (%[wq0])\n"
+
+ "vle8.v v6, (%[wp1])\n"
+ "vle8.v v7, (%[wq1])\n"
+ "vxor.vv v6, v6, v4\n"
+ "vxor.vv v7, v7, v5\n"
+ "vse8.v v6, (%[wp1])\n"
+ "vse8.v v7, (%[wq1])\n"
+
+ "vle8.v v10, (%[wp2])\n"
+ "vle8.v v11, (%[wq2])\n"
+ "vxor.vv v10, v10, v8\n"
+ "vxor.vv v11, v11, v9\n"
+ "vse8.v v10, (%[wp2])\n"
+ "vse8.v v11, (%[wq2])\n"
+
+ "vle8.v v14, (%[wp3])\n"
+ "vle8.v v15, (%[wq3])\n"
+ "vxor.vv v14, v14, v12\n"
+ "vxor.vv v15, v15, v13\n"
+ "vse8.v v14, (%[wp3])\n"
+ "vse8.v v15, (%[wq3])\n"
+
+ "vle8.v v18, (%[wp4])\n"
+ "vle8.v v19, (%[wq4])\n"
+ "vxor.vv v18, v18, v16\n"
+ "vxor.vv v19, v19, v17\n"
+ "vse8.v v18, (%[wp4])\n"
+ "vse8.v v19, (%[wq4])\n"
+
+ "vle8.v v22, (%[wp5])\n"
+ "vle8.v v23, (%[wq5])\n"
+ "vxor.vv v22, v22, v20\n"
+ "vxor.vv v23, v23, v21\n"
+ "vse8.v v22, (%[wp5])\n"
+ "vse8.v v23, (%[wq5])\n"
+
+ "vle8.v v26, (%[wp6])\n"
+ "vle8.v v27, (%[wq6])\n"
+ "vxor.vv v26, v26, v24\n"
+ "vxor.vv v27, v27, v25\n"
+ "vse8.v v26, (%[wp6])\n"
+ "vse8.v v27, (%[wq6])\n"
+
+ "vle8.v v30, (%[wp7])\n"
+ "vle8.v v31, (%[wq7])\n"
+ "vxor.vv v30, v30, v28\n"
+ "vxor.vv v31, v31, v29\n"
+ "vse8.v v30, (%[wp7])\n"
+ "vse8.v v31, (%[wq7])\n"
+ ".option pop\n"
+ : :
+ [wp0]"r"(&p[d + NSIZE * 0]),
+ [wq0]"r"(&q[d + NSIZE * 0]),
+ [wp1]"r"(&p[d + NSIZE * 1]),
+ [wq1]"r"(&q[d + NSIZE * 1]),
+ [wp2]"r"(&p[d + NSIZE * 2]),
+ [wq2]"r"(&q[d + NSIZE * 2]),
+ [wp3]"r"(&p[d + NSIZE * 3]),
+ [wq3]"r"(&q[d + NSIZE * 3]),
+ [wp4]"r"(&p[d + NSIZE * 4]),
+ [wq4]"r"(&q[d + NSIZE * 4]),
+ [wp5]"r"(&p[d + NSIZE * 5]),
+ [wq5]"r"(&q[d + NSIZE * 5]),
+ [wp6]"r"(&p[d + NSIZE * 6]),
+ [wq6]"r"(&q[d + NSIZE * 6]),
+ [wp7]"r"(&p[d + NSIZE * 7]),
+ [wq7]"r"(&q[d + NSIZE * 7])
+ );
+ }
+}
+
+RAID6_RVV_WRAPPER(1);
+RAID6_RVV_WRAPPER(2);
+RAID6_RVV_WRAPPER(4);
+RAID6_RVV_WRAPPER(8);
diff --git a/lib/raid6/rvv.h b/lib/raid6/rvv.h
new file mode 100644
index 000000000000..94044a1b707b
--- /dev/null
+++ b/lib/raid6/rvv.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2024 Institute of Software, CAS.
+ *
+ * raid6/rvv.h
+ *
+ * Definitions for RISC-V RAID-6 code
+ */
+
+#define RAID6_RVV_WRAPPER(_n) \
+ static void raid6_rvv ## _n ## _gen_syndrome(int disks, \
+ size_t bytes, void **ptrs) \
+ { \
+ void raid6_rvv ## _n ## _gen_syndrome_real(int d, \
+ unsigned long b, void **p); \
+ kernel_vector_begin(); \
+ raid6_rvv ## _n ## _gen_syndrome_real(disks, \
+ (unsigned long)bytes, ptrs); \
+ kernel_vector_end(); \
+ } \
+ static void raid6_rvv ## _n ## _xor_syndrome(int disks, \
+ int start, int stop, \
+ size_t bytes, void **ptrs) \
+ { \
+ void raid6_rvv ## _n ## _xor_syndrome_real(int d, \
+ int s1, int s2, \
+ unsigned long b, void **p); \
+ kernel_vector_begin(); \
+ raid6_rvv ## _n ## _xor_syndrome_real(disks, \
+ start, stop, (unsigned long)bytes, ptrs); \
+ kernel_vector_end(); \
+ } \
+ struct raid6_calls const raid6_rvvx ## _n = { \
+ raid6_rvv ## _n ## _gen_syndrome, \
+ raid6_rvv ## _n ## _xor_syndrome, \
+ rvv_has_vector, \
+ "rvvx" #_n, \
+ 0 \
+ }
diff --git a/lib/string.c b/lib/string.c
index eb4486ed40d2..b632c71df1a5 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -119,6 +119,7 @@ ssize_t sized_strscpy(char *dest, const char *src, size_t count)
if (count == 0 || WARN_ON_ONCE(count > INT_MAX))
return -E2BIG;
+#ifndef CONFIG_DCACHE_WORD_ACCESS
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
/*
* If src is unaligned, don't cross a page boundary,
@@ -134,11 +135,13 @@ ssize_t sized_strscpy(char *dest, const char *src, size_t count)
if (((long) dest | (long) src) & (sizeof(long) - 1))
max = 0;
#endif
+#endif
/*
- * read_word_at_a_time() below may read uninitialized bytes after the
- * trailing zero and use them in comparisons. Disable this optimization
- * under KMSAN to prevent false positive reports.
+ * load_unaligned_zeropad() or read_word_at_a_time() below may read
+ * uninitialized bytes after the trailing zero and use them in
+ * comparisons. Disable this optimization under KMSAN to prevent
+ * false positive reports.
*/
if (IS_ENABLED(CONFIG_KMSAN))
max = 0;
@@ -146,7 +149,11 @@ ssize_t sized_strscpy(char *dest, const char *src, size_t count)
while (max >= sizeof(unsigned long)) {
unsigned long c, data;
+#ifdef CONFIG_DCACHE_WORD_ACCESS
+ c = load_unaligned_zeropad(src+res);
+#else
c = read_word_at_a_time(src+res);
+#endif
if (has_zero(c, &data, &constants)) {
data = prep_zero_mask(c, data, &constants);
data = create_zero_mask(data);
diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c
index 8772e5edaa4f..a4b6f52b9c57 100644
--- a/lib/test_ubsan.c
+++ b/lib/test_ubsan.c
@@ -77,18 +77,22 @@ static void test_ubsan_shift_out_of_bounds(void)
static void test_ubsan_out_of_bounds(void)
{
- volatile int i = 4, j = 5, k = -1;
- volatile char above[4] = { }; /* Protect surrounding memory. */
- volatile int arr[4];
- volatile char below[4] = { }; /* Protect surrounding memory. */
+ int i = 4, j = 4, k = -1;
+ volatile struct {
+ char above[4]; /* Protect surrounding memory. */
+ int arr[4];
+ char below[4]; /* Protect surrounding memory. */
+ } data;
- above[0] = below[0];
+ OPTIMIZER_HIDE_VAR(i);
+ OPTIMIZER_HIDE_VAR(j);
+ OPTIMIZER_HIDE_VAR(k);
UBSAN_TEST(CONFIG_UBSAN_BOUNDS, "above");
- arr[j] = i;
+ data.arr[j] = i;
UBSAN_TEST(CONFIG_UBSAN_BOUNDS, "below");
- arr[k] = i;
+ data.arr[k] = i;
}
enum ubsan_test_enum {
diff --git a/lib/tests/slub_kunit.c b/lib/tests/slub_kunit.c
index d47c472b0520..848b682a2d70 100644
--- a/lib/tests/slub_kunit.c
+++ b/lib/tests/slub_kunit.c
@@ -325,4 +325,5 @@ static struct kunit_suite test_suite = {
};
kunit_test_suite(test_suite);
+MODULE_DESCRIPTION("Kunit tests for slub allocator");
MODULE_LICENSE("GPL");
diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
index 9308bcfb2ad5..dfb4f2358cab 100644
--- a/lib/ucs2_string.c
+++ b/lib/ucs2_string.c
@@ -165,4 +165,5 @@ ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength)
}
EXPORT_SYMBOL(ucs2_as_utf8);
+MODULE_DESCRIPTION("UCS2 string handling");
MODULE_LICENSE("GPL v2");
diff --git a/lib/zlib_inflate/inflate_syms.c b/lib/zlib_inflate/inflate_syms.c
index 9720114c0672..b8996d90e8bc 100644
--- a/lib/zlib_inflate/inflate_syms.c
+++ b/lib/zlib_inflate/inflate_syms.c
@@ -18,4 +18,5 @@ EXPORT_SYMBOL(zlib_inflateEnd);
EXPORT_SYMBOL(zlib_inflateReset);
EXPORT_SYMBOL(zlib_inflateIncomp);
EXPORT_SYMBOL(zlib_inflate_blob);
+MODULE_DESCRIPTION("Data decompression using the deflation algorithm");
MODULE_LICENSE("GPL");