1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_ARCHRANDOM_H
3 #define _ASM_ARCHRANDOM_H
4
5 #include <linux/arm-smccc.h>
6 #include <linux/bug.h>
7 #include <linux/kernel.h>
8 #include <asm/cpufeature.h>
9
10 #define ARM_SMCCC_TRNG_MIN_VERSION 0x10000UL
11
12 extern bool smccc_trng_available;
13
smccc_probe_trng(void)14 static inline bool __init smccc_probe_trng(void)
15 {
16 struct arm_smccc_res res;
17
18 arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_VERSION, &res);
19 if ((s32)res.a0 < 0)
20 return false;
21
22 return res.a0 >= ARM_SMCCC_TRNG_MIN_VERSION;
23 }
24
__arm64_rndr(unsigned long * v)25 static inline bool __arm64_rndr(unsigned long *v)
26 {
27 bool ok;
28
29 /*
30 * Reads of RNDR set PSTATE.NZCV to 0b0000 on success,
31 * and set PSTATE.NZCV to 0b0100 otherwise.
32 */
33 asm volatile(
34 __mrs_s("%0", SYS_RNDR_EL0) "\n"
35 " cset %w1, ne\n"
36 : "=r" (*v), "=r" (ok)
37 :
38 : "cc");
39
40 return ok;
41 }
42
__arm64_rndrrs(unsigned long * v)43 static inline bool __arm64_rndrrs(unsigned long *v)
44 {
45 bool ok;
46
47 /*
48 * Reads of RNDRRS set PSTATE.NZCV to 0b0000 on success,
49 * and set PSTATE.NZCV to 0b0100 otherwise.
50 */
51 asm volatile(
52 __mrs_s("%0", SYS_RNDRRS_EL0) "\n"
53 " cset %w1, ne\n"
54 : "=r" (*v), "=r" (ok)
55 :
56 : "cc");
57
58 return ok;
59 }
60
arch_get_random_longs(unsigned long * v,size_t max_longs)61 static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
62 {
63 /*
64 * Only support the generic interface after we have detected
65 * the system wide capability, avoiding complexity with the
66 * cpufeature code and with potential scheduling between CPUs
67 * with and without the feature.
68 */
69 if (max_longs && cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndr(v))
70 return 1;
71 return 0;
72 }
73
arch_get_random_seed_longs(unsigned long * v,size_t max_longs)74 static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
75 {
76 if (!max_longs)
77 return 0;
78
79 /*
80 * We prefer the SMCCC call, since its semantics (return actual
81 * hardware backed entropy) is closer to the idea behind this
82 * function here than what even the RNDRSS register provides
83 * (the output of a pseudo RNG freshly seeded by a TRNG).
84 */
85 if (smccc_trng_available) {
86 struct arm_smccc_res res;
87
88 max_longs = min_t(size_t, 3, max_longs);
89 arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, max_longs * 64, &res);
90 if ((int)res.a0 >= 0) {
91 switch (max_longs) {
92 case 3:
93 *v++ = res.a1;
94 fallthrough;
95 case 2:
96 *v++ = res.a2;
97 fallthrough;
98 case 1:
99 *v++ = res.a3;
100 break;
101 }
102 return max_longs;
103 }
104 }
105
106 /*
107 * RNDRRS is not backed by an entropy source but by a DRBG that is
108 * reseeded after each invocation. This is not a 100% fit but good
109 * enough to implement this API if no other entropy source exists.
110 */
111 if (cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndrrs(v))
112 return 1;
113
114 return 0;
115 }
116
__early_cpu_has_rndr(void)117 static inline bool __init __early_cpu_has_rndr(void)
118 {
119 /* Open code as we run prior to the first call to cpufeature. */
120 unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
121 return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
122 }
123
124 static inline size_t __init __must_check
arch_get_random_seed_longs_early(unsigned long * v,size_t max_longs)125 arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs)
126 {
127 WARN_ON(system_state != SYSTEM_BOOTING);
128
129 if (!max_longs)
130 return 0;
131
132 if (smccc_trng_available) {
133 struct arm_smccc_res res;
134
135 max_longs = min_t(size_t, 3, max_longs);
136 arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, max_longs * 64, &res);
137 if ((int)res.a0 >= 0) {
138 switch (max_longs) {
139 case 3:
140 *v++ = res.a1;
141 fallthrough;
142 case 2:
143 *v++ = res.a2;
144 fallthrough;
145 case 1:
146 *v++ = res.a3;
147 break;
148 }
149 return max_longs;
150 }
151 }
152
153 if (__early_cpu_has_rndr() && __arm64_rndr(v))
154 return 1;
155
156 return 0;
157 }
158 #define arch_get_random_seed_longs_early arch_get_random_seed_longs_early
159
160 #endif /* _ASM_ARCHRANDOM_H */
161