1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
4  */
5 #ifndef _ASM_LOONGARCH_XOR_H
6 #define _ASM_LOONGARCH_XOR_H
7 
8 #include <asm/cpu-features.h>
9 #include <asm/xor_simd.h>
10 
11 #ifdef CONFIG_CPU_HAS_LSX
12 static struct xor_block_template xor_block_lsx = {
13 	.name = "lsx",
14 	.do_2 = xor_lsx_2,
15 	.do_3 = xor_lsx_3,
16 	.do_4 = xor_lsx_4,
17 	.do_5 = xor_lsx_5,
18 };
19 
20 #define XOR_SPEED_LSX()					\
21 	do {						\
22 		if (cpu_has_lsx)			\
23 			xor_speed(&xor_block_lsx);	\
24 	} while (0)
25 #else /* CONFIG_CPU_HAS_LSX */
26 #define XOR_SPEED_LSX()
27 #endif /* CONFIG_CPU_HAS_LSX */
28 
29 #ifdef CONFIG_CPU_HAS_LASX
30 static struct xor_block_template xor_block_lasx = {
31 	.name = "lasx",
32 	.do_2 = xor_lasx_2,
33 	.do_3 = xor_lasx_3,
34 	.do_4 = xor_lasx_4,
35 	.do_5 = xor_lasx_5,
36 };
37 
38 #define XOR_SPEED_LASX()					\
39 	do {							\
40 		if (cpu_has_lasx)				\
41 			xor_speed(&xor_block_lasx);		\
42 	} while (0)
43 #else /* CONFIG_CPU_HAS_LASX */
44 #define XOR_SPEED_LASX()
45 #endif /* CONFIG_CPU_HAS_LASX */
46 
47 /*
48  * For grins, also test the generic routines.
49  *
50  * More importantly: it cannot be ruled out at this point of time, that some
51  * future (maybe reduced) models could run the vector algorithms slower than
52  * the scalar ones, maybe for errata or micro-op reasons. It may be
53  * appropriate to revisit this after one or two more uarch generations.
54  */
55 #include <asm-generic/xor.h>
56 
57 #undef XOR_TRY_TEMPLATES
58 #define XOR_TRY_TEMPLATES				\
59 do {							\
60 	xor_speed(&xor_block_8regs);			\
61 	xor_speed(&xor_block_8regs_p);			\
62 	xor_speed(&xor_block_32regs);			\
63 	xor_speed(&xor_block_32regs_p);			\
64 	XOR_SPEED_LSX();				\
65 	XOR_SPEED_LASX();				\
66 } while (0)
67 
68 #endif /* _ASM_LOONGARCH_XOR_H */
69