1 /*
2  * Copyright (c) 2018 Nordic Semiconductor ASA
3  * Copyright (c) 2017 Exati Tecnologia Ltda.
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/drivers/entropy.h>
9 #include <zephyr/kernel.h>
10 #include <zephyr/sys/atomic.h>
11 #include <soc.h>
12 #include <hal/nrf_rng.h>
13 #include <zephyr/irq.h>
14 
15 #define DT_DRV_COMPAT	nordic_nrf_rng
16 
17 #define IRQN		DT_INST_IRQN(0)
18 #define IRQ_PRIO	DT_INST_IRQ(0, priority)
19 
20 /*
21  * The nRF5 RNG HW has several characteristics that need to be taken
22  * into account by the driver to achieve energy efficient generation
23  * of entropy.
24  *
25  * The RNG does not support continuously DMA'ing entropy into RAM,
26  * values must be read out by the CPU byte-by-byte. But once started,
27  * it will continue to generate bytes until stopped.
28  *
29  * The generation time for byte 0 after starting generation (with BIAS
30  * correction) is:
31  *
32  * nRF51822 - 677us
33  * nRF52810 - 248us
34  * nRF52840 - 248us
35  *
36  * The generation time for byte N >= 1 after starting generation (with
37  * BIAS correction) is:
38  *
39  * nRF51822 - 677us
40  * nRF52810 - 120us
41  * nRF52840 - 120us
42  *
43  * Due to the first byte in a stream of bytes being more costly on
44  * some platforms a "water system" inspired algorithm is used to
45  * amortize the cost of the first byte.
46  *
47  * The algorithm will delay generation of entropy until the amount of
48  * bytes goes below THRESHOLD, at which point it will generate entropy
49  * until the BUF_LEN limit is reached.
50  *
51  * The entropy level is checked at the end of every consumption of
52  * entropy.
53  *
54  * The algorithm and HW together has these characteristics:
55  *
56  * Setting a low threshold will highly amortize the extra 120us cost
57  * of the first byte on nRF52.
58  *
59  * Setting a high threshold will minimize the time spent waiting for
60  * entropy.
61  *
62  * To minimize power consumption the threshold should either be set
63  * low or high depending on the HFCLK-usage pattern of other
64  * components.
65  *
66  * If the threshold is set close to the BUF_LEN, and the system
67  * happens to anyway be using the HFCLK for several hundred us after
68  * entropy is requested there will be no extra current-consumption for
69  * keeping clocks running for entropy generation.
70  *
71  */
72 
73 struct rng_pool {
74 	uint8_t first_alloc;
75 	uint8_t first_read;
76 	uint8_t last;
77 	uint8_t mask;
78 	uint8_t threshold;
79 	uint8_t buffer[0];
80 };
81 
82 #define RNG_POOL_DEFINE(name, len) uint8_t name[sizeof(struct rng_pool) + (len)]
83 
84 BUILD_ASSERT((CONFIG_ENTROPY_NRF5_ISR_POOL_SIZE &
85 	      (CONFIG_ENTROPY_NRF5_ISR_POOL_SIZE - 1)) == 0,
86 	     "The CONFIG_ENTROPY_NRF5_ISR_POOL_SIZE must be a power of 2!");
87 
88 BUILD_ASSERT((CONFIG_ENTROPY_NRF5_THR_POOL_SIZE &
89 	      (CONFIG_ENTROPY_NRF5_THR_POOL_SIZE - 1)) == 0,
90 	     "The CONFIG_ENTROPY_NRF5_THR_POOL_SIZE must be a power of 2!");
91 
92 struct entropy_nrf5_dev_data {
93 	struct k_sem sem_lock;
94 	struct k_sem sem_sync;
95 
96 	RNG_POOL_DEFINE(isr, CONFIG_ENTROPY_NRF5_ISR_POOL_SIZE);
97 	RNG_POOL_DEFINE(thr, CONFIG_ENTROPY_NRF5_THR_POOL_SIZE);
98 };
99 
100 static struct entropy_nrf5_dev_data entropy_nrf5_data;
101 
random_byte_get(void)102 static int random_byte_get(void)
103 {
104 	int retval = -EAGAIN;
105 	unsigned int key;
106 
107 	key = irq_lock();
108 
109 	if (nrf_rng_event_check(NRF_RNG, NRF_RNG_EVENT_VALRDY)) {
110 		retval = nrf_rng_random_value_get(NRF_RNG);
111 		nrf_rng_event_clear(NRF_RNG, NRF_RNG_EVENT_VALRDY);
112 	}
113 
114 	irq_unlock(key);
115 
116 	return retval;
117 }
118 
rng_pool_get(struct rng_pool * rngp,uint8_t * buf,uint16_t len)119 static uint16_t rng_pool_get(struct rng_pool *rngp, uint8_t *buf, uint16_t len)
120 {
121 	uint32_t last  = rngp->last;
122 	uint32_t mask  = rngp->mask;
123 	uint8_t *dst   = buf;
124 	uint32_t first, available;
125 	uint32_t other_read_in_progress;
126 	unsigned int key;
127 
128 	key = irq_lock();
129 	first = rngp->first_alloc;
130 
131 	/*
132 	 * The other_read_in_progress is non-zero if rngp->first_read != first,
133 	 * which means that lower-priority code (which was interrupted by this
134 	 * call) already allocated area for read.
135 	 */
136 	other_read_in_progress = (rngp->first_read ^ first);
137 
138 	available = (last - first) & mask;
139 	if (available < len) {
140 		len = available;
141 	}
142 
143 	/*
144 	 * Move alloc index forward to signal, that part of the buffer is
145 	 * now reserved for this call.
146 	 */
147 	rngp->first_alloc = (first + len) & mask;
148 	irq_unlock(key);
149 
150 	while (likely(len--)) {
151 		*dst++ = rngp->buffer[first];
152 		first = (first + 1) & mask;
153 	}
154 
155 	/*
156 	 * If this call is the last one accessing the pool, move read index
157 	 * to signal that all allocated regions are now read and could be
158 	 * overwritten.
159 	 */
160 	if (likely(!other_read_in_progress)) {
161 		key = irq_lock();
162 		rngp->first_read = rngp->first_alloc;
163 		irq_unlock(key);
164 	}
165 
166 	len = dst - buf;
167 	available = available - len;
168 	if (available <= rngp->threshold) {
169 		nrf_rng_task_trigger(NRF_RNG, NRF_RNG_TASK_START);
170 	}
171 
172 	return len;
173 }
174 
rng_pool_put(struct rng_pool * rngp,uint8_t byte)175 static int rng_pool_put(struct rng_pool *rngp, uint8_t byte)
176 {
177 	uint8_t first = rngp->first_read;
178 	uint8_t last  = rngp->last;
179 	uint8_t mask  = rngp->mask;
180 
181 	/* Signal error if the pool is full. */
182 	if (((last - first) & mask) == mask) {
183 		return -ENOBUFS;
184 	}
185 
186 	rngp->buffer[last] = byte;
187 	rngp->last = (last + 1) & mask;
188 
189 	return 0;
190 }
191 
rng_pool_init(struct rng_pool * rngp,uint16_t size,uint8_t threshold)192 static void rng_pool_init(struct rng_pool *rngp, uint16_t size, uint8_t threshold)
193 {
194 	rngp->first_alloc = 0U;
195 	rngp->first_read  = 0U;
196 	rngp->last	  = 0U;
197 	rngp->mask	  = size - 1;
198 	rngp->threshold	  = threshold;
199 }
200 
isr(const void * arg)201 static void isr(const void *arg)
202 {
203 	int byte, ret;
204 
205 	ARG_UNUSED(arg);
206 
207 	byte = random_byte_get();
208 	if (byte < 0) {
209 		return;
210 	}
211 
212 	ret = rng_pool_put((struct rng_pool *)(entropy_nrf5_data.isr), byte);
213 	if (ret < 0) {
214 		ret = rng_pool_put((struct rng_pool *)(entropy_nrf5_data.thr),
215 				   byte);
216 		if (ret < 0) {
217 			nrf_rng_task_trigger(NRF_RNG, NRF_RNG_TASK_STOP);
218 		}
219 
220 		k_sem_give(&entropy_nrf5_data.sem_sync);
221 	}
222 }
223 
entropy_nrf5_get_entropy(const struct device * dev,uint8_t * buf,uint16_t len)224 static int entropy_nrf5_get_entropy(const struct device *dev, uint8_t *buf,
225 				    uint16_t len)
226 {
227 	/* Check if this API is called on correct driver instance. */
228 	__ASSERT_NO_MSG(&entropy_nrf5_data == dev->data);
229 
230 	while (len) {
231 		uint16_t bytes;
232 
233 		k_sem_take(&entropy_nrf5_data.sem_lock, K_FOREVER);
234 		bytes = rng_pool_get((struct rng_pool *)(entropy_nrf5_data.thr),
235 				     buf, len);
236 		k_sem_give(&entropy_nrf5_data.sem_lock);
237 
238 		if (bytes == 0U) {
239 			/* Pool is empty: Sleep until next interrupt. */
240 			k_sem_take(&entropy_nrf5_data.sem_sync, K_FOREVER);
241 			continue;
242 		}
243 
244 		len -= bytes;
245 		buf += bytes;
246 	}
247 
248 	return 0;
249 }
250 
entropy_nrf5_get_entropy_isr(const struct device * dev,uint8_t * buf,uint16_t len,uint32_t flags)251 static int entropy_nrf5_get_entropy_isr(const struct device *dev,
252 					uint8_t *buf, uint16_t len,
253 					uint32_t flags)
254 {
255 	uint16_t cnt = len;
256 
257 	/* Check if this API is called on correct driver instance. */
258 	__ASSERT_NO_MSG(&entropy_nrf5_data == dev->data);
259 
260 	if (likely((flags & ENTROPY_BUSYWAIT) == 0U)) {
261 		return rng_pool_get((struct rng_pool *)(entropy_nrf5_data.isr),
262 				    buf, len);
263 	}
264 
265 	if (len) {
266 		unsigned int key;
267 		int irq_enabled;
268 
269 		key = irq_lock();
270 		irq_enabled = irq_is_enabled(IRQN);
271 		irq_disable(IRQN);
272 		irq_unlock(key);
273 
274 		nrf_rng_event_clear(NRF_RNG, NRF_RNG_EVENT_VALRDY);
275 		nrf_rng_task_trigger(NRF_RNG, NRF_RNG_TASK_START);
276 
277 		/* Clear NVIC pending bit. This ensures that a subsequent
278 		 * RNG event will set the Cortex-M single-bit event register
279 		 * to 1 (the bit is set when NVIC pending IRQ status is
280 		 * changed from 0 to 1)
281 		 */
282 		NVIC_ClearPendingIRQ(IRQN);
283 
284 		do {
285 			int byte;
286 
287 			while (!nrf_rng_event_check(NRF_RNG,
288 						    NRF_RNG_EVENT_VALRDY)) {
289 				k_cpu_atomic_idle(irq_lock());
290 			}
291 
292 			byte = random_byte_get();
293 			NVIC_ClearPendingIRQ(IRQN);
294 
295 			if (byte < 0) {
296 				continue;
297 			}
298 
299 			buf[--len] = byte;
300 		} while (len);
301 
302 		if (irq_enabled) {
303 			irq_enable(IRQN);
304 		}
305 	}
306 
307 	return cnt;
308 }
309 
310 static int entropy_nrf5_init(const struct device *dev);
311 
312 static const struct entropy_driver_api entropy_nrf5_api_funcs = {
313 	.get_entropy = entropy_nrf5_get_entropy,
314 	.get_entropy_isr = entropy_nrf5_get_entropy_isr
315 };
316 
317 DEVICE_DT_INST_DEFINE(0,
318 		    entropy_nrf5_init, NULL,
319 		    &entropy_nrf5_data, NULL,
320 		    PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY,
321 		    &entropy_nrf5_api_funcs);
322 
entropy_nrf5_init(const struct device * dev)323 static int entropy_nrf5_init(const struct device *dev)
324 {
325 	/* Check if this API is called on correct driver instance. */
326 	__ASSERT_NO_MSG(&entropy_nrf5_data == dev->data);
327 
328 	/* Locking semaphore initialized to 1 (unlocked) */
329 	k_sem_init(&entropy_nrf5_data.sem_lock, 1, 1);
330 
331 	/* Synching semaphore */
332 	k_sem_init(&entropy_nrf5_data.sem_sync, 0, 1);
333 
334 	rng_pool_init((struct rng_pool *)(entropy_nrf5_data.thr),
335 		      CONFIG_ENTROPY_NRF5_THR_POOL_SIZE,
336 		      CONFIG_ENTROPY_NRF5_THR_THRESHOLD);
337 	rng_pool_init((struct rng_pool *)(entropy_nrf5_data.isr),
338 		      CONFIG_ENTROPY_NRF5_ISR_POOL_SIZE,
339 		      CONFIG_ENTROPY_NRF5_ISR_THRESHOLD);
340 
341 	/* Enable or disable bias correction */
342 	if (IS_ENABLED(CONFIG_ENTROPY_NRF5_BIAS_CORRECTION)) {
343 		nrf_rng_error_correction_enable(NRF_RNG);
344 	} else {
345 		nrf_rng_error_correction_disable(NRF_RNG);
346 	}
347 
348 	nrf_rng_event_clear(NRF_RNG, NRF_RNG_EVENT_VALRDY);
349 	nrf_rng_int_enable(NRF_RNG, NRF_RNG_INT_VALRDY_MASK);
350 	nrf_rng_task_trigger(NRF_RNG, NRF_RNG_TASK_START);
351 
352 	IRQ_CONNECT(IRQN, IRQ_PRIO, isr, &entropy_nrf5_data, 0);
353 	irq_enable(IRQN);
354 
355 	return 0;
356 }
357