1 /*
2 * Copyright (c) 2018 Nordic Semiconductor ASA
3 * Copyright (c) 2017 Exati Tecnologia Ltda.
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <zephyr/drivers/entropy.h>
9 #include <zephyr/kernel.h>
10 #include <zephyr/sys/atomic.h>
11 #include <zephyr/sys/util.h>
12 #include <soc.h>
13 #include <hal/nrf_rng.h>
14 #include <zephyr/irq.h>
15
16 #define DT_DRV_COMPAT nordic_nrf_rng
17
18 #define IRQN DT_INST_IRQN(0)
19 #define IRQ_PRIO DT_INST_IRQ(0, priority)
20
21 /*
22 * The nRF5 RNG HW has several characteristics that need to be taken
23 * into account by the driver to achieve energy efficient generation
24 * of entropy.
25 *
26 * The RNG does not support continuously DMA'ing entropy into RAM,
27 * values must be read out by the CPU byte-by-byte. But once started,
28 * it will continue to generate bytes until stopped.
29 *
30 * The generation time for byte 0 after starting generation (with BIAS
31 * correction) is:
32 *
33 * nRF51822 - 677us
34 * nRF52810 - 248us
35 * nRF52840 - 248us
36 *
37 * The generation time for byte N >= 1 after starting generation (with
38 * BIAS correction) is:
39 *
40 * nRF51822 - 677us
41 * nRF52810 - 120us
42 * nRF52840 - 120us
43 *
44 * Due to the first byte in a stream of bytes being more costly on
45 * some platforms a "water system" inspired algorithm is used to
46 * amortize the cost of the first byte.
47 *
48 * The algorithm will delay generation of entropy until the amount of
49 * bytes goes below THRESHOLD, at which point it will generate entropy
50 * until the BUF_LEN limit is reached.
51 *
52 * The entropy level is checked at the end of every consumption of
53 * entropy.
54 *
55 * The algorithm and HW together has these characteristics:
56 *
57 * Setting a low threshold will highly amortize the extra 120us cost
58 * of the first byte on nRF52.
59 *
60 * Setting a high threshold will minimize the time spent waiting for
61 * entropy.
62 *
63 * To minimize power consumption the threshold should either be set
64 * low or high depending on the HFCLK-usage pattern of other
65 * components.
66 *
67 * If the threshold is set close to the BUF_LEN, and the system
68 * happens to anyway be using the HFCLK for several hundred us after
69 * entropy is requested there will be no extra current-consumption for
70 * keeping clocks running for entropy generation.
71 *
72 */
73
74 struct rng_pool {
75 uint8_t first_alloc;
76 uint8_t first_read;
77 uint8_t last;
78 uint8_t mask;
79 uint8_t threshold;
80 FLEXIBLE_ARRAY_DECLARE(uint8_t, buffer);
81 };
82
83 #define RNG_POOL_DEFINE(name, len) uint8_t name[sizeof(struct rng_pool) + (len)]
84
85 BUILD_ASSERT((CONFIG_ENTROPY_NRF5_ISR_POOL_SIZE &
86 (CONFIG_ENTROPY_NRF5_ISR_POOL_SIZE - 1)) == 0,
87 "The CONFIG_ENTROPY_NRF5_ISR_POOL_SIZE must be a power of 2!");
88
89 BUILD_ASSERT((CONFIG_ENTROPY_NRF5_THR_POOL_SIZE &
90 (CONFIG_ENTROPY_NRF5_THR_POOL_SIZE - 1)) == 0,
91 "The CONFIG_ENTROPY_NRF5_THR_POOL_SIZE must be a power of 2!");
92
93 struct entropy_nrf5_dev_data {
94 struct k_sem sem_lock;
95 struct k_sem sem_sync;
96
97 RNG_POOL_DEFINE(isr, CONFIG_ENTROPY_NRF5_ISR_POOL_SIZE);
98 RNG_POOL_DEFINE(thr, CONFIG_ENTROPY_NRF5_THR_POOL_SIZE);
99 };
100
101 static struct entropy_nrf5_dev_data entropy_nrf5_data;
102
random_byte_get(void)103 static int random_byte_get(void)
104 {
105 int retval = -EAGAIN;
106 unsigned int key;
107
108 key = irq_lock();
109
110 if (nrf_rng_event_check(NRF_RNG, NRF_RNG_EVENT_VALRDY)) {
111 retval = nrf_rng_random_value_get(NRF_RNG);
112 nrf_rng_event_clear(NRF_RNG, NRF_RNG_EVENT_VALRDY);
113 }
114
115 irq_unlock(key);
116
117 return retval;
118 }
119
rng_pool_get(struct rng_pool * rngp,uint8_t * buf,uint16_t len)120 static uint16_t rng_pool_get(struct rng_pool *rngp, uint8_t *buf, uint16_t len)
121 {
122 uint32_t last = rngp->last;
123 uint32_t mask = rngp->mask;
124 uint8_t *dst = buf;
125 uint32_t first, available;
126 uint32_t other_read_in_progress;
127 unsigned int key;
128
129 key = irq_lock();
130 first = rngp->first_alloc;
131
132 /*
133 * The other_read_in_progress is non-zero if rngp->first_read != first,
134 * which means that lower-priority code (which was interrupted by this
135 * call) already allocated area for read.
136 */
137 other_read_in_progress = (rngp->first_read ^ first);
138
139 available = (last - first) & mask;
140 if (available < len) {
141 len = available;
142 }
143
144 /*
145 * Move alloc index forward to signal, that part of the buffer is
146 * now reserved for this call.
147 */
148 rngp->first_alloc = (first + len) & mask;
149 irq_unlock(key);
150
151 while (likely(len--)) {
152 *dst++ = rngp->buffer[first];
153 first = (first + 1) & mask;
154 }
155
156 /*
157 * If this call is the last one accessing the pool, move read index
158 * to signal that all allocated regions are now read and could be
159 * overwritten.
160 */
161 if (likely(!other_read_in_progress)) {
162 key = irq_lock();
163 rngp->first_read = rngp->first_alloc;
164 irq_unlock(key);
165 }
166
167 len = dst - buf;
168 available = available - len;
169 if (available <= rngp->threshold) {
170 nrf_rng_task_trigger(NRF_RNG, NRF_RNG_TASK_START);
171 }
172
173 return len;
174 }
175
rng_pool_put(struct rng_pool * rngp,uint8_t byte)176 static int rng_pool_put(struct rng_pool *rngp, uint8_t byte)
177 {
178 uint8_t first = rngp->first_read;
179 uint8_t last = rngp->last;
180 uint8_t mask = rngp->mask;
181
182 /* Signal error if the pool is full. */
183 if (((last - first) & mask) == mask) {
184 return -ENOBUFS;
185 }
186
187 rngp->buffer[last] = byte;
188 rngp->last = (last + 1) & mask;
189
190 return 0;
191 }
192
rng_pool_init(struct rng_pool * rngp,uint16_t size,uint8_t threshold)193 static void rng_pool_init(struct rng_pool *rngp, uint16_t size, uint8_t threshold)
194 {
195 rngp->first_alloc = 0U;
196 rngp->first_read = 0U;
197 rngp->last = 0U;
198 rngp->mask = size - 1;
199 rngp->threshold = threshold;
200 }
201
isr(const void * arg)202 static void isr(const void *arg)
203 {
204 int byte, ret;
205
206 ARG_UNUSED(arg);
207
208 byte = random_byte_get();
209 if (byte < 0) {
210 return;
211 }
212
213 ret = rng_pool_put((struct rng_pool *)(entropy_nrf5_data.isr), byte);
214 if (ret < 0) {
215 ret = rng_pool_put((struct rng_pool *)(entropy_nrf5_data.thr),
216 byte);
217 if (ret < 0) {
218 nrf_rng_task_trigger(NRF_RNG, NRF_RNG_TASK_STOP);
219 }
220
221 k_sem_give(&entropy_nrf5_data.sem_sync);
222 }
223 }
224
entropy_nrf5_get_entropy(const struct device * dev,uint8_t * buf,uint16_t len)225 static int entropy_nrf5_get_entropy(const struct device *dev, uint8_t *buf,
226 uint16_t len)
227 {
228 /* Check if this API is called on correct driver instance. */
229 __ASSERT_NO_MSG(&entropy_nrf5_data == dev->data);
230
231 while (len) {
232 uint16_t bytes;
233
234 k_sem_take(&entropy_nrf5_data.sem_lock, K_FOREVER);
235 bytes = rng_pool_get((struct rng_pool *)(entropy_nrf5_data.thr),
236 buf, len);
237 k_sem_give(&entropy_nrf5_data.sem_lock);
238
239 if (bytes == 0U) {
240 /* Pool is empty: Sleep until next interrupt. */
241 k_sem_take(&entropy_nrf5_data.sem_sync, K_FOREVER);
242 continue;
243 }
244
245 len -= bytes;
246 buf += bytes;
247 }
248
249 return 0;
250 }
251
entropy_nrf5_get_entropy_isr(const struct device * dev,uint8_t * buf,uint16_t len,uint32_t flags)252 static int entropy_nrf5_get_entropy_isr(const struct device *dev,
253 uint8_t *buf, uint16_t len,
254 uint32_t flags)
255 {
256 uint16_t cnt = len;
257
258 /* Check if this API is called on correct driver instance. */
259 __ASSERT_NO_MSG(&entropy_nrf5_data == dev->data);
260
261 if (likely((flags & ENTROPY_BUSYWAIT) == 0U)) {
262 return rng_pool_get((struct rng_pool *)(entropy_nrf5_data.isr),
263 buf, len);
264 }
265
266 if (len) {
267 unsigned int key;
268 int irq_enabled;
269
270 key = irq_lock();
271 irq_enabled = irq_is_enabled(IRQN);
272 irq_disable(IRQN);
273 irq_unlock(key);
274
275 nrf_rng_event_clear(NRF_RNG, NRF_RNG_EVENT_VALRDY);
276 nrf_rng_task_trigger(NRF_RNG, NRF_RNG_TASK_START);
277
278 /* Clear NVIC pending bit. This ensures that a subsequent
279 * RNG event will set the Cortex-M single-bit event register
280 * to 1 (the bit is set when NVIC pending IRQ status is
281 * changed from 0 to 1)
282 */
283 NVIC_ClearPendingIRQ(IRQN);
284
285 do {
286 int byte;
287
288 while (!nrf_rng_event_check(NRF_RNG,
289 NRF_RNG_EVENT_VALRDY)) {
290 k_cpu_atomic_idle(irq_lock());
291 }
292
293 byte = random_byte_get();
294 NVIC_ClearPendingIRQ(IRQN);
295
296 if (byte < 0) {
297 continue;
298 }
299
300 buf[--len] = byte;
301 } while (len);
302
303 if (irq_enabled) {
304 irq_enable(IRQN);
305 }
306 }
307
308 return cnt;
309 }
310
311 static int entropy_nrf5_init(const struct device *dev);
312
313 static DEVICE_API(entropy, entropy_nrf5_api_funcs) = {
314 .get_entropy = entropy_nrf5_get_entropy,
315 .get_entropy_isr = entropy_nrf5_get_entropy_isr
316 };
317
318 DEVICE_DT_INST_DEFINE(0,
319 entropy_nrf5_init, NULL,
320 &entropy_nrf5_data, NULL,
321 PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY,
322 &entropy_nrf5_api_funcs);
323
entropy_nrf5_init(const struct device * dev)324 static int entropy_nrf5_init(const struct device *dev)
325 {
326 /* Check if this API is called on correct driver instance. */
327 __ASSERT_NO_MSG(&entropy_nrf5_data == dev->data);
328
329 /* Locking semaphore initialized to 1 (unlocked) */
330 k_sem_init(&entropy_nrf5_data.sem_lock, 1, 1);
331
332 /* Synching semaphore */
333 k_sem_init(&entropy_nrf5_data.sem_sync, 0, 1);
334
335 rng_pool_init((struct rng_pool *)(entropy_nrf5_data.thr),
336 CONFIG_ENTROPY_NRF5_THR_POOL_SIZE,
337 CONFIG_ENTROPY_NRF5_THR_THRESHOLD);
338 rng_pool_init((struct rng_pool *)(entropy_nrf5_data.isr),
339 CONFIG_ENTROPY_NRF5_ISR_POOL_SIZE,
340 CONFIG_ENTROPY_NRF5_ISR_THRESHOLD);
341
342 /* Enable or disable bias correction */
343 if (IS_ENABLED(CONFIG_ENTROPY_NRF5_BIAS_CORRECTION)) {
344 nrf_rng_error_correction_enable(NRF_RNG);
345 } else {
346 nrf_rng_error_correction_disable(NRF_RNG);
347 }
348
349 nrf_rng_event_clear(NRF_RNG, NRF_RNG_EVENT_VALRDY);
350 nrf_rng_int_enable(NRF_RNG, NRF_RNG_INT_VALRDY_MASK);
351 nrf_rng_task_trigger(NRF_RNG, NRF_RNG_TASK_START);
352
353 IRQ_CONNECT(IRQN, IRQ_PRIO, isr, &entropy_nrf5_data, 0);
354 irq_enable(IRQN);
355
356 return 0;
357 }
358