1 /*
2 * Copyright (c) 2017 Erwin Rol <erwin@erwinrol.com>
3 * Copyright (c) 2018 Nordic Semiconductor ASA
4 * Copyright (c) 2017 Exati Tecnologia Ltda.
5 * Copyright (c) 2020 STMicroelectronics.
6 *
7 * SPDX-License-Identifier: Apache-2.0
8 */
9 #include <zephyr/kernel.h>
10 #include <zephyr/device.h>
11 #include <zephyr/drivers/entropy.h>
12 #include <zephyr/random/random.h>
13 #include <zephyr/init.h>
14 #include <zephyr/sys/__assert.h>
15 #include <zephyr/sys/util.h>
16 #include <errno.h>
17 #include <soc.h>
18 #include <zephyr/pm/policy.h>
19 #include <stm32_ll_bus.h>
20 #include <stm32_ll_rcc.h>
21 #include <stm32_ll_rng.h>
22 #include <stm32_ll_pka.h>
23 #include <stm32_ll_system.h>
24 #include <zephyr/sys/printk.h>
25 #include <zephyr/pm/device.h>
26 #include <zephyr/drivers/clock_control.h>
27 #include <zephyr/drivers/clock_control/stm32_clock_control.h>
28 #include <zephyr/irq.h>
29 #include <zephyr/sys/barrier.h>
30 #include "stm32_hsem.h"
31
32 #include "entropy_stm32.h"
33
34 #if defined(RNG_CR_CONDRST)
35 #define STM32_CONDRST_SUPPORT
36 #endif
37
38 /*
39 * This driver need to take into account all STM32 family:
40 * - simple rng without hardware fifo and no DMA.
41 * - Variable delay between two consecutive random numbers
42 * (depending on family and clock settings)
43 * - IRQ-less TRNG instances
44 *
45 * Due to the first byte in a stream of bytes being more costly on
46 * some platforms a "water system" inspired algorithm is used to
47 * amortize the cost of the first byte.
48 *
49 * The algorithm will delay generation of entropy until the amount of
50 * bytes goes below THRESHOLD, at which point it will generate entropy
51 * until the BUF_LEN limit is reached.
52 *
53 * The entropy level is checked at the end of every consumption of
54 * entropy.
55 *
56 * For TRNG instances with no IRQ, a delayable work item is scheduled
57 * on the system work queue and used to "simulate" device-generated
58 * interrupts - this is done to reduce polling to a minimum.
59 */
60
61 struct rng_pool {
62 uint8_t first_alloc;
63 uint8_t first_read;
64 uint8_t last;
65 uint8_t mask;
66 uint8_t threshold;
67 FLEXIBLE_ARRAY_DECLARE(uint8_t, buffer);
68 };
69
70 #define RNG_POOL_DEFINE(name, len) uint8_t name[sizeof(struct rng_pool) + (len)]
71
72 BUILD_ASSERT((CONFIG_ENTROPY_STM32_ISR_POOL_SIZE &
73 (CONFIG_ENTROPY_STM32_ISR_POOL_SIZE - 1)) == 0,
74 "The CONFIG_ENTROPY_STM32_ISR_POOL_SIZE must be a power of 2!");
75
76 BUILD_ASSERT((CONFIG_ENTROPY_STM32_THR_POOL_SIZE &
77 (CONFIG_ENTROPY_STM32_THR_POOL_SIZE - 1)) == 0,
78 "The CONFIG_ENTROPY_STM32_THR_POOL_SIZE must be a power of 2!");
79
80 /**
81 * RM0505 §14.4 "TRNG functional description":
82 * To use the TRNG peripheral the system clock frequency must be
83 * at least 32 MHz. See also: §6.2.2 "Peripheral clock details".
84 */
85 BUILD_ASSERT(!IS_ENABLED(CONFIG_SOC_STM32WB09XX) ||
86 CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC >= (32 * 1000 * 1000),
87 "STM32WB09: TRNG requires system clock frequency >= 32MHz");
88
89 struct entropy_stm32_rng_dev_cfg {
90 struct stm32_pclken *pclken;
91 };
92
93 struct entropy_stm32_rng_dev_data {
94 RNG_TypeDef *rng;
95 const struct device *clock;
96 struct k_sem sem_lock;
97 struct k_sem sem_sync;
98 struct k_work filling_work;
99 #if IRQLESS_TRNG
100 /* work item that polls TRNG to refill pools */
101 struct k_work_delayable trng_poll_work;
102 #endif /* IRQLESS_TRNG */
103 bool filling_pools;
104
105 RNG_POOL_DEFINE(isr, CONFIG_ENTROPY_STM32_ISR_POOL_SIZE);
106 RNG_POOL_DEFINE(thr, CONFIG_ENTROPY_STM32_THR_POOL_SIZE);
107 };
108
109 static struct stm32_pclken pclken_rng[] = STM32_DT_INST_CLOCKS(0);
110
111 static struct entropy_stm32_rng_dev_cfg entropy_stm32_rng_config = {
112 .pclken = pclken_rng
113 };
114
115 static struct entropy_stm32_rng_dev_data entropy_stm32_rng_data = {
116 .rng = (RNG_TypeDef *)DT_INST_REG_ADDR(0),
117 };
118
entropy_stm32_suspend(void)119 static int entropy_stm32_suspend(void)
120 {
121 const struct device *dev = DEVICE_DT_GET(DT_DRV_INST(0));
122 struct entropy_stm32_rng_dev_data *dev_data = dev->data;
123 const struct entropy_stm32_rng_dev_cfg *dev_cfg = dev->config;
124 RNG_TypeDef *rng = dev_data->rng;
125 int res;
126
127 #if defined(CONFIG_SOC_SERIES_STM32WBX) || defined(CONFIG_STM32H7_DUAL_CORE)
128 /* Prevent concurrent access with PM */
129 z_stm32_hsem_lock(CFG_HW_RNG_SEMID, HSEM_LOCK_WAIT_FOREVER);
130 #endif /* CONFIG_SOC_SERIES_STM32WBX || CONFIG_STM32H7_DUAL_CORE */
131 LL_RNG_Disable(rng);
132 #if defined(CONFIG_SOC_STM32WB09XX)
133 /* RM0505 Rev.2 §14.4:
134 * "After the TRNG IP is disabled by setting CR.DISABLE, in order to
135 * properly restart the TRNG IP, the AES_RESET bit must be set to 1
136 * (that is, resetting the AES core and restarting all health tests)."
137 */
138 LL_RNG_SetAesReset(rng, 1);
139 #endif /* CONFIG_SOC_STM32WB09XX */
140
141 #ifdef CONFIG_SOC_SERIES_STM32WBAX
142 uint32_t wait_cycles, rng_rate;
143
144 if (LL_PKA_IsEnabled(PKA)) {
145 return 0;
146 }
147
148 if (clock_control_get_rate(dev_data->clock,
149 (clock_control_subsys_t) &dev_cfg->pclken[0],
150 &rng_rate) < 0) {
151 return -EIO;
152 }
153
154 wait_cycles = SystemCoreClock / rng_rate * 2;
155
156 for (int i = wait_cycles; i >= 0; i--) {
157 }
158 #endif /* CONFIG_SOC_SERIES_STM32WBAX */
159
160 res = clock_control_off(dev_data->clock,
161 (clock_control_subsys_t)&dev_cfg->pclken[0]);
162
163 #if defined(CONFIG_SOC_SERIES_STM32WBX) || defined(CONFIG_STM32H7_DUAL_CORE)
164 z_stm32_hsem_unlock(CFG_HW_RNG_SEMID);
165 #endif /* CONFIG_SOC_SERIES_STM32WBX || CONFIG_STM32H7_DUAL_CORE */
166
167 return res;
168 }
169
entropy_stm32_resume(void)170 static int entropy_stm32_resume(void)
171 {
172 const struct device *dev = DEVICE_DT_GET(DT_DRV_INST(0));
173 struct entropy_stm32_rng_dev_data *dev_data = dev->data;
174 const struct entropy_stm32_rng_dev_cfg *dev_cfg = dev->config;
175 RNG_TypeDef *rng = dev_data->rng;
176 int res;
177
178 res = clock_control_on(dev_data->clock,
179 (clock_control_subsys_t)&dev_cfg->pclken[0]);
180 LL_RNG_Enable(rng);
181 ll_rng_enable_it(rng);
182
183 return res;
184 }
185
configure_rng(void)186 static void configure_rng(void)
187 {
188 RNG_TypeDef *rng = entropy_stm32_rng_data.rng;
189
190 #ifdef STM32_CONDRST_SUPPORT
191 uint32_t desired_nist_cfg = DT_INST_PROP_OR(0, nist_config, 0U);
192 uint32_t desired_htcr = DT_INST_PROP_OR(0, health_test_config, 0U);
193 uint32_t cur_nist_cfg = 0U;
194 uint32_t cur_htcr = 0U;
195
196 #if DT_INST_NODE_HAS_PROP(0, nist_config)
197 /*
198 * Configure the RNG_CR in compliance with the NIST SP800.
199 * The nist-config is direclty copied from the DTS.
200 * The RNG clock must be 48MHz else the clock DIV is not adpated.
201 * The RNG_CR_CONDRST is set to 1 at the same time the RNG_CR is written
202 */
203 cur_nist_cfg = READ_BIT(rng->CR,
204 (RNG_CR_NISTC | RNG_CR_CLKDIV | RNG_CR_RNG_CONFIG1 |
205 RNG_CR_RNG_CONFIG2 | RNG_CR_RNG_CONFIG3
206 #if defined(RNG_CR_ARDIS)
207 | RNG_CR_ARDIS
208 /* For STM32U5 series, the ARDIS bit7 is considered in the nist-config */
209 #endif /* RNG_CR_ARDIS */
210 ));
211 #endif /* nist_config */
212
213 #if DT_INST_NODE_HAS_PROP(0, health_test_config)
214 cur_htcr = LL_RNG_GetHealthConfig(rng);
215 #endif /* health_test_config */
216
217 if (cur_nist_cfg != desired_nist_cfg || cur_htcr != desired_htcr) {
218 MODIFY_REG(rng->CR, cur_nist_cfg, (desired_nist_cfg | RNG_CR_CONDRST));
219
220 #if DT_INST_NODE_HAS_PROP(0, health_test_config)
221 #if DT_INST_NODE_HAS_PROP(0, health_test_magic)
222 LL_RNG_SetHealthConfig(rng, DT_INST_PROP(0, health_test_magic));
223 #endif /* health_test_magic */
224 LL_RNG_SetHealthConfig(rng, desired_htcr);
225 #endif /* health_test_config */
226
227 LL_RNG_DisableCondReset(rng);
228 /* Wait for conditioning reset process to be completed */
229 while (LL_RNG_IsEnabledCondReset(rng) == 1) {
230 }
231 }
232 #endif /* STM32_CONDRST_SUPPORT */
233
234 LL_RNG_Enable(rng);
235 ll_rng_enable_it(rng);
236 }
237
acquire_rng(void)238 static void acquire_rng(void)
239 {
240 entropy_stm32_resume();
241 #if defined(CONFIG_SOC_SERIES_STM32WBX) || defined(CONFIG_STM32H7_DUAL_CORE)
242 /* Lock the RNG to prevent concurrent access */
243 z_stm32_hsem_lock(CFG_HW_RNG_SEMID, HSEM_LOCK_WAIT_FOREVER);
244 /* RNG configuration could have been changed by the other core */
245 configure_rng();
246 #endif /* CONFIG_SOC_SERIES_STM32WBX || CONFIG_STM32H7_DUAL_CORE */
247 }
248
release_rng(void)249 static void release_rng(void)
250 {
251 entropy_stm32_suspend();
252 #if defined(CONFIG_SOC_SERIES_STM32WBX) || defined(CONFIG_STM32H7_DUAL_CORE)
253 z_stm32_hsem_unlock(CFG_HW_RNG_SEMID);
254 #endif /* CONFIG_SOC_SERIES_STM32WBX || CONFIG_STM32H7_DUAL_CORE */
255 }
256
entropy_stm32_got_error(RNG_TypeDef * rng)257 static int entropy_stm32_got_error(RNG_TypeDef *rng)
258 {
259 __ASSERT_NO_MSG(rng != NULL);
260
261 #if defined(STM32_CONDRST_SUPPORT)
262 if (LL_RNG_IsActiveFlag_CECS(rng)) {
263 return 1;
264 }
265 #endif
266
267 if (ll_rng_is_active_seis(rng)) {
268 return 1;
269 }
270
271 return 0;
272 }
273
274 #if defined(STM32_CONDRST_SUPPORT)
275 /* SOCS w/ soft-reset support: execute the reset */
recover_seed_error(RNG_TypeDef * rng)276 static int recover_seed_error(RNG_TypeDef *rng)
277 {
278 uint32_t count_timeout = 0;
279
280 LL_RNG_EnableCondReset(rng);
281 LL_RNG_DisableCondReset(rng);
282 /* When reset process is done cond reset bit is read 0
283 * This typically takes: 2 AHB clock cycles + 2 RNG clock cycles.
284 */
285
286 while (LL_RNG_IsEnabledCondReset(rng) ||
287 ll_rng_is_active_seis(rng) ||
288 ll_rng_is_active_secs(rng)) {
289 count_timeout++;
290 if (count_timeout == 10) {
291 return -ETIMEDOUT;
292 }
293 }
294
295 return 0;
296 }
297
298 #else /* !STM32_CONDRST_SUPPORT */
299 /* SOCS w/o soft-reset support: flush pipeline */
recover_seed_error(RNG_TypeDef * rng)300 static int recover_seed_error(RNG_TypeDef *rng)
301 {
302 ll_rng_clear_seis(rng);
303
304 for (int i = 0; i < 12; ++i) {
305 (void)ll_rng_read_rand_data(rng);
306 }
307
308 if (ll_rng_is_active_seis(rng) != 0) {
309 return -EIO;
310 }
311
312 return 0;
313 }
314 #endif /* !STM32_CONDRST_SUPPORT */
315
random_byte_get(void)316 static int random_byte_get(void)
317 {
318 int retval = -EAGAIN;
319 unsigned int key;
320 RNG_TypeDef *rng = entropy_stm32_rng_data.rng;
321
322 key = irq_lock();
323
324 #if defined(CONFIG_ENTROPY_STM32_CLK_CHECK)
325 if (!k_is_pre_kernel()) {
326 /* CECS bit signals that a clock configuration issue is detected,
327 * which may lead to generation of non truly random data.
328 */
329 __ASSERT(LL_RNG_IsActiveFlag_CECS(rng) == 0,
330 "CECS = 1: RNG domain clock is too slow.\n"
331 "\tSee ref man and update target clock configuration.");
332 }
333 #endif /* CONFIG_ENTROPY_STM32_CLK_CHECK */
334
335 if (ll_rng_is_active_seis(rng) && (recover_seed_error(rng) < 0)) {
336 retval = -EIO;
337 goto out;
338 }
339
340 if (ll_rng_is_active_drdy(rng) == 1) {
341 if (entropy_stm32_got_error(rng)) {
342 retval = -EIO;
343 goto out;
344 }
345
346 retval = ll_rng_read_rand_data(rng);
347 if (retval == 0) {
348 /* A seed error could have occurred between RNG_SR
349 * polling and RND_DR output reading.
350 */
351 retval = -EAGAIN;
352 goto out;
353 }
354
355 retval &= 0xFF;
356 }
357
358 out:
359
360 irq_unlock(key);
361
362 return retval;
363 }
364
generate_from_isr(uint8_t * buf,uint16_t len)365 static uint16_t generate_from_isr(uint8_t *buf, uint16_t len)
366 {
367 uint16_t remaining_len = len;
368
369 #if !IRQLESS_TRNG
370 __ASSERT_NO_MSG(!irq_is_enabled(IRQN));
371 #endif /* !IRQLESS_TRNG */
372
373 #if defined(CONFIG_SOC_SERIES_STM32WBX) || defined(CONFIG_STM32H7_DUAL_CORE)
374 __ASSERT_NO_MSG(z_stm32_hsem_is_owned(CFG_HW_RNG_SEMID));
375 #endif /* CONFIG_SOC_SERIES_STM32WBX || CONFIG_STM32H7_DUAL_CORE */
376
377 /* do not proceed if a Seed error occurred */
378 if (ll_rng_is_active_secs(entropy_stm32_rng_data.rng) ||
379 ll_rng_is_active_seis(entropy_stm32_rng_data.rng)) {
380
381 (void)random_byte_get(); /* this will recover the error */
382
383 return 0; /* return cnt is null : no random data available */
384 }
385
386 #if !IRQLESS_TRNG
387 /* Clear NVIC pending bit. This ensures that a subsequent
388 * RNG event will set the Cortex-M single-bit event register
389 * to 1 (the bit is set when NVIC pending IRQ status is
390 * changed from 0 to 1)
391 */
392 NVIC_ClearPendingIRQ(IRQN);
393 #endif /* !IRQLESS_TRNG */
394
395 do {
396 int byte;
397
398 while (ll_rng_is_active_drdy(
399 entropy_stm32_rng_data.rng) != 1) {
400 #if !IRQLESS_TRNG
401 /*
402 * Enter low-power mode while waiting for event
403 * generated by TRNG interrupt becoming pending.
404 *
405 * To guarantee waking up from the event, the
406 * SEV-On-Pend feature must be enabled (enabled
407 * during ARCH initialization).
408 *
409 * DSB is recommended by spec before WFE (to
410 * guarantee completion of memory transactions)
411 */
412 barrier_dsync_fence_full();
413 __WFE();
414 __SEV();
415 __WFE();
416 #endif /* !IRQLESS_TRNG */
417 }
418
419 byte = random_byte_get();
420 #if !IRQLESS_TRNG
421 NVIC_ClearPendingIRQ(IRQN);
422 #endif /* IRQLESS_TRNG */
423
424 if (byte < 0) {
425 continue;
426 }
427
428 buf[--remaining_len] = byte;
429 } while (remaining_len);
430
431 return len;
432 }
433
start_pool_filling(bool wait)434 static int start_pool_filling(bool wait)
435 {
436 unsigned int key;
437 bool already_filling;
438
439 key = irq_lock();
440 #if defined(CONFIG_SOC_SERIES_STM32WBX) || defined(CONFIG_STM32H7_DUAL_CORE)
441 /* In non-blocking mode, return immediately if the RNG is not available */
442 if (!wait && z_stm32_hsem_try_lock(CFG_HW_RNG_SEMID) != 0) {
443 irq_unlock(key);
444 return -EAGAIN;
445 }
446 #else
447 ARG_UNUSED(wait);
448 #endif /* CONFIG_SOC_SERIES_STM32WBX || CONFIG_STM32H7_DUAL_CORE */
449
450 already_filling = entropy_stm32_rng_data.filling_pools;
451 entropy_stm32_rng_data.filling_pools = true;
452 irq_unlock(key);
453
454 if (unlikely(already_filling)) {
455 return 0;
456 }
457
458 /* Prevent the clocks to be stopped during the duration the rng pool is
459 * being populated. The ISR will release the constraint again when the
460 * rng pool is filled.
461 */
462 pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
463 if (IS_ENABLED(CONFIG_PM_S2RAM)) {
464 pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_RAM, PM_ALL_SUBSTATES);
465 }
466
467 acquire_rng();
468 #if IRQLESS_TRNG
469 k_work_schedule(&entropy_stm32_rng_data.trng_poll_work, TRNG_GENERATION_DELAY);
470 #else /* !IRQLESS_TRNG */
471 irq_enable(IRQN);
472 #endif /* IRQLESS_TRNG */
473 return 0;
474 }
475
pool_filling_work_handler(struct k_work * work)476 static void pool_filling_work_handler(struct k_work *work)
477 {
478 if (start_pool_filling(false) != 0) {
479 /* RNG could not be acquired, try again */
480 k_work_submit(work);
481 }
482 }
483
rng_pool_get(struct rng_pool * rngp,uint8_t * buf,uint16_t len)484 static uint16_t rng_pool_get(struct rng_pool *rngp, uint8_t *buf,
485 uint16_t len)
486 {
487 uint32_t last = rngp->last;
488 uint32_t mask = rngp->mask;
489 uint8_t *dst = buf;
490 uint32_t first, available;
491 uint32_t other_read_in_progress;
492 unsigned int key;
493
494 key = irq_lock();
495 first = rngp->first_alloc;
496
497 /*
498 * The other_read_in_progress is non-zero if rngp->first_read != first,
499 * which means that lower-priority code (which was interrupted by this
500 * call) already allocated area for read.
501 */
502 other_read_in_progress = (rngp->first_read ^ first);
503
504 available = (last - first) & mask;
505 if (available < len) {
506 len = available;
507 }
508
509 /*
510 * Move alloc index forward to signal, that part of the buffer is
511 * now reserved for this call.
512 */
513 rngp->first_alloc = (first + len) & mask;
514 irq_unlock(key);
515
516 while (likely(len--)) {
517 *dst++ = rngp->buffer[first];
518 first = (first + 1) & mask;
519 }
520
521 /*
522 * If this call is the last one accessing the pool, move read index
523 * to signal that all allocated regions are now read and could be
524 * overwritten.
525 */
526 if (likely(!other_read_in_progress)) {
527 key = irq_lock();
528 rngp->first_read = rngp->first_alloc;
529 irq_unlock(key);
530 }
531
532 len = dst - buf;
533 available = available - len;
534 if (available <= rngp->threshold) {
535 /*
536 * Avoid starting pool filling from ISR as it might require
537 * blocking if RNG is not available and a race condition could
538 * also occur if this ISR has interrupted the RNG ISR.
539 *
540 * If the TRNG has no IRQ line, always schedule the work item,
541 * as this is what fills the RNG pools instead of the ISR.
542 */
543 if (k_is_in_isr() || IRQLESS_TRNG) {
544 k_work_submit(&entropy_stm32_rng_data.filling_work);
545 } else {
546 start_pool_filling(true);
547 }
548 }
549
550 return len;
551 }
552
rng_pool_put(struct rng_pool * rngp,uint8_t byte)553 static int rng_pool_put(struct rng_pool *rngp, uint8_t byte)
554 {
555 uint8_t first = rngp->first_read;
556 uint8_t last = rngp->last;
557 uint8_t mask = rngp->mask;
558
559 /* Signal error if the pool is full. */
560 if (((last - first) & mask) == mask) {
561 return -ENOBUFS;
562 }
563
564 rngp->buffer[last] = byte;
565 rngp->last = (last + 1) & mask;
566
567 return 0;
568 }
569
rng_pool_init(struct rng_pool * rngp,uint16_t size,uint8_t threshold)570 static void rng_pool_init(struct rng_pool *rngp, uint16_t size,
571 uint8_t threshold)
572 {
573 rngp->first_alloc = 0U;
574 rngp->first_read = 0U;
575 rngp->last = 0U;
576 rngp->mask = size - 1;
577 rngp->threshold = threshold;
578 }
579
perform_pool_refill(void)580 static int perform_pool_refill(void)
581 {
582 int byte, ret;
583
584 byte = random_byte_get();
585 if (byte < 0) {
586 return -EIO;
587 }
588
589 ret = rng_pool_put((struct rng_pool *)(entropy_stm32_rng_data.isr),
590 byte);
591 if (ret < 0) {
592 ret = rng_pool_put(
593 (struct rng_pool *)(entropy_stm32_rng_data.thr),
594 byte);
595 if (ret < 0) {
596 #if !IRQLESS_TRNG
597 irq_disable(IRQN);
598 #endif /* !IRQLESS_TRNG */
599 release_rng();
600 pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
601 if (IS_ENABLED(CONFIG_PM_S2RAM)) {
602 pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_RAM, PM_ALL_SUBSTATES);
603 }
604 entropy_stm32_rng_data.filling_pools = false;
605 }
606
607 k_sem_give(&entropy_stm32_rng_data.sem_sync);
608 }
609
610 return ret;
611 }
612
613 #if IRQLESS_TRNG
trng_poll_work_item(struct k_work * work)614 static void trng_poll_work_item(struct k_work *work)
615 {
616 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
617 RNG_TypeDef *rng = entropy_stm32_rng_data.rng;
618
619 /* Seed error occurred: reset TRNG and try again */
620 if (ll_rng_is_active_secs(entropy_stm32_rng_data.rng) ||
621 ll_rng_is_active_seis(entropy_stm32_rng_data.rng)) {
622
623 (void)random_byte_get(); /* this will recover the error */
624 } else if (ll_rng_is_active_drdy(rng)) {
625 /* Entropy available: read it and fill pools */
626 int res = perform_pool_refill();
627
628 if (res == -ENOBUFS) {
629 /**
630 * All RNG pools are full - no more work needed.
631 * Exit early to stop the work item from re-scheduling
632 * itself. The RNG peripheral has already been released
633 * by perform_pool_refill().
634 */
635 return;
636 }
637 } else {
638 /**
639 * No entropy available - try again later
640 */
641 }
642
643 /* Schedule ourselves for next cycle */
644 k_work_schedule(dwork, TRNG_GENERATION_DELAY);
645 }
646 #else /* !IRQLESS_TRNG */
stm32_rng_isr(const void * arg)647 static void stm32_rng_isr(const void *arg)
648 {
649 ARG_UNUSED(arg);
650
651 (void)perform_pool_refill();
652 }
653 #endif /* IRQLESS_TRNG */
654
entropy_stm32_rng_get_entropy(const struct device * dev,uint8_t * buf,uint16_t len)655 static int entropy_stm32_rng_get_entropy(const struct device *dev,
656 uint8_t *buf,
657 uint16_t len)
658 {
659 /* Check if this API is called on correct driver instance. */
660 __ASSERT_NO_MSG(&entropy_stm32_rng_data == dev->data);
661
662 while (len) {
663 uint16_t bytes;
664
665 k_sem_take(&entropy_stm32_rng_data.sem_lock, K_FOREVER);
666 bytes = rng_pool_get(
667 (struct rng_pool *)(entropy_stm32_rng_data.thr),
668 buf, len);
669
670 if (bytes == 0U) {
671 /* Pool is empty: Sleep until next interrupt. */
672 k_sem_take(&entropy_stm32_rng_data.sem_sync, K_FOREVER);
673 }
674
675 k_sem_give(&entropy_stm32_rng_data.sem_lock);
676
677 len -= bytes;
678 buf += bytes;
679 }
680
681 return 0;
682 }
683
entropy_stm32_rng_get_entropy_isr(const struct device * dev,uint8_t * buf,uint16_t len,uint32_t flags)684 static int entropy_stm32_rng_get_entropy_isr(const struct device *dev,
685 uint8_t *buf,
686 uint16_t len,
687 uint32_t flags)
688 {
689 uint16_t cnt = len;
690
691 /* Check if this API is called on correct driver instance. */
692 __ASSERT_NO_MSG(&entropy_stm32_rng_data == dev->data);
693
694 if (likely((flags & ENTROPY_BUSYWAIT) == 0U)) {
695 return rng_pool_get(
696 (struct rng_pool *)(entropy_stm32_rng_data.isr),
697 buf, len);
698 }
699
700 if (len) {
701 /**
702 * On TRNG without interrupt line, we cannot allow reentrancy,
703 * so we have to suspend all interrupts. Otherwise, only suspend
704 * it until we have established ourselves as owner of the TRNG
705 * to prevent race with a higher priority interrupt handler.
706 */
707 unsigned int key = irq_lock();
708 bool rng_already_acquired = false;
709 #if !IRQLESS_TRNG
710 int irq_enabled = irq_is_enabled(IRQN);
711
712 rng_already_acquired = (irq_enabled != 0);
713 irq_disable(IRQN);
714 irq_unlock(key);
715 #endif /* !IRQLESS_TRNG */
716
717 /* Do not release if IRQ is enabled. RNG will be released in ISR
718 * when the pools are full. On TRNG without interrupt line, the
719 * default value of false ensures TRNG is always released.
720 */
721 if (z_stm32_hsem_is_owned(CFG_HW_RNG_SEMID)) {
722 rng_already_acquired = true;
723 }
724 acquire_rng();
725
726 cnt = generate_from_isr(buf, len);
727
728 /* Restore the state of the RNG lock and IRQ */
729 if (!rng_already_acquired) {
730 release_rng();
731 }
732
733 #if IRQLESS_TRNG
734 /* Exit critical section */
735 irq_unlock(key);
736 #else
737 if (irq_enabled) {
738 irq_enable(IRQN);
739 }
740 #endif /* !IRQLESS_TRNG */
741 }
742
743 return cnt;
744 }
745
entropy_stm32_rng_init(const struct device * dev)746 static int entropy_stm32_rng_init(const struct device *dev)
747 {
748 struct entropy_stm32_rng_dev_data *dev_data;
749 const struct entropy_stm32_rng_dev_cfg *dev_cfg;
750 int res;
751
752 __ASSERT_NO_MSG(dev != NULL);
753
754 dev_data = dev->data;
755 dev_cfg = dev->config;
756
757 __ASSERT_NO_MSG(dev_data != NULL);
758 __ASSERT_NO_MSG(dev_cfg != NULL);
759
760 dev_data->clock = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE);
761
762 if (!device_is_ready(dev_data->clock)) {
763 return -ENODEV;
764 }
765
766 res = clock_control_on(dev_data->clock,
767 (clock_control_subsys_t)&dev_cfg->pclken[0]);
768 __ASSERT_NO_MSG(res == 0);
769
770 /* Configure domain clock if any */
771 if (DT_INST_NUM_CLOCKS(0) > 1) {
772 res = clock_control_configure(dev_data->clock,
773 (clock_control_subsys_t)&dev_cfg->pclken[1],
774 NULL);
775 __ASSERT(res == 0, "Could not select RNG domain clock");
776 }
777
778 /* Locking semaphore initialized to 1 (unlocked) */
779 k_sem_init(&dev_data->sem_lock, 1, 1);
780
781 /* Synching semaphore */
782 k_sem_init(&dev_data->sem_sync, 0, 1);
783
784 k_work_init(&dev_data->filling_work, pool_filling_work_handler);
785
786 #if IRQLESS_TRNG
787 k_work_init_delayable(&dev_data->trng_poll_work, trng_poll_work_item);
788 #endif /* IRQLESS_TRNG */
789
790 rng_pool_init((struct rng_pool *)(dev_data->thr),
791 CONFIG_ENTROPY_STM32_THR_POOL_SIZE,
792 CONFIG_ENTROPY_STM32_THR_THRESHOLD);
793 rng_pool_init((struct rng_pool *)(dev_data->isr),
794 CONFIG_ENTROPY_STM32_ISR_POOL_SIZE,
795 CONFIG_ENTROPY_STM32_ISR_THRESHOLD);
796
797 #if !IRQLESS_TRNG
798 IRQ_CONNECT(IRQN, IRQ_PRIO, stm32_rng_isr, &entropy_stm32_rng_data, 0);
799 #endif /* !IRQLESS_TRNG */
800
801 #if !defined(CONFIG_SOC_SERIES_STM32WBX) && !defined(CONFIG_STM32H7_DUAL_CORE)
802 /* For multi-core MCUs, RNG configuration is automatically performed
803 * after acquiring the RNG in start_pool_filling()
804 */
805 configure_rng();
806 #endif /* !CONFIG_SOC_SERIES_STM32WBX && !CONFIG_STM32H7_DUAL_CORE */
807
808 start_pool_filling(true);
809
810 return 0;
811 }
812
813 #ifdef CONFIG_PM_DEVICE
entropy_stm32_rng_pm_action(const struct device * dev,enum pm_device_action action)814 static int entropy_stm32_rng_pm_action(const struct device *dev,
815 enum pm_device_action action)
816 {
817 struct entropy_stm32_rng_dev_data *dev_data = dev->data;
818
819 int res = 0;
820
821 /* Remove warning on some platforms */
822 ARG_UNUSED(dev_data);
823
824 switch (action) {
825 case PM_DEVICE_ACTION_SUSPEND:
826 #if defined(CONFIG_SOC_SERIES_STM32WBX) || defined(CONFIG_STM32H7_DUAL_CORE)
827 /* Lock to Prevent concurrent access with PM */
828 z_stm32_hsem_lock(CFG_HW_RNG_SEMID, HSEM_LOCK_WAIT_FOREVER);
829 /* Call release_rng instead of entropy_stm32_suspend to avoid double hsem_unlock */
830 #endif /* CONFIG_SOC_SERIES_STM32WBX || CONFIG_STM32H7_DUAL_CORE */
831 release_rng();
832 break;
833 case PM_DEVICE_ACTION_RESUME:
834 if (IS_ENABLED(CONFIG_PM_S2RAM)) {
835 #if DT_INST_NODE_HAS_PROP(0, health_test_config)
836 entropy_stm32_resume();
837 #if DT_INST_NODE_HAS_PROP(0, health_test_magic)
838 LL_RNG_SetHealthConfig(dev_data->rng, DT_INST_PROP(0, health_test_magic));
839 #endif /* health_test_magic */
840 if (LL_RNG_GetHealthConfig(dev_data->rng) !=
841 DT_INST_PROP_OR(0, health_test_config, 0U)) {
842 entropy_stm32_rng_init(dev);
843 } else if (!entropy_stm32_rng_data.filling_pools) {
844 /* Resume RNG only if it was suspended during filling pool */
845 #if defined(CONFIG_SOC_SERIES_STM32WBX) || defined(CONFIG_STM32H7_DUAL_CORE)
846 /* Lock to Prevent concurrent access with PM */
847 z_stm32_hsem_lock(CFG_HW_RNG_SEMID, HSEM_LOCK_WAIT_FOREVER);
848 /*
849 * Call release_rng instead of entropy_stm32_suspend
850 * to avoid double hsem_unlock
851 */
852 #endif /* CONFIG_SOC_SERIES_STM32WBX || CONFIG_STM32H7_DUAL_CORE */
853 release_rng();
854 }
855 #endif /* health_test_config */
856 } else {
857 /* Resume RNG only if it was suspended during filling pool */
858 if (entropy_stm32_rng_data.filling_pools) {
859 res = entropy_stm32_resume();
860 }
861 }
862 break;
863 default:
864 return -ENOTSUP;
865 }
866
867 return res;
868 }
869 #endif /* CONFIG_PM_DEVICE */
870
871 static DEVICE_API(entropy, entropy_stm32_rng_api) = {
872 .get_entropy = entropy_stm32_rng_get_entropy,
873 .get_entropy_isr = entropy_stm32_rng_get_entropy_isr
874 };
875
876 PM_DEVICE_DT_INST_DEFINE(0, entropy_stm32_rng_pm_action);
877
878 DEVICE_DT_INST_DEFINE(0,
879 entropy_stm32_rng_init,
880 PM_DEVICE_DT_INST_GET(0),
881 &entropy_stm32_rng_data, &entropy_stm32_rng_config,
882 PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY,
883 &entropy_stm32_rng_api);
884