1 /*
2 * Copyright (c) 2019 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief System/hardware module for Nordic Semiconductor nRF53 family processor
10 *
11 * This module provides routines to initialize and support board-level hardware
12 * for the Nordic Semiconductor nRF53 family processor.
13 */
14
15 #include <zephyr/kernel.h>
16 #include <zephyr/init.h>
17 #include <zephyr/sys/barrier.h>
18 #include <soc/nrfx_coredep.h>
19 #include <zephyr/logging/log.h>
20 #include <nrf_erratas.h>
21 #include <hal/nrf_power.h>
22 #include <hal/nrf_ipc.h>
23 #include <helpers/nrfx_gppi.h>
24 #if defined(CONFIG_SOC_NRF5340_CPUAPP)
25 #include <zephyr/drivers/gpio.h>
26 #include <zephyr/devicetree.h>
27 #include <hal/nrf_cache.h>
28 #include <hal/nrf_gpio.h>
29 #include <hal/nrf_oscillators.h>
30 #include <hal/nrf_regulators.h>
31 #elif defined(CONFIG_SOC_NRF5340_CPUNET)
32 #include <hal/nrf_nvmc.h>
33 #endif
34 #include <hal/nrf_wdt.h>
35 #include <hal/nrf_rtc.h>
36 #include <soc_secure.h>
37
38 #include <cmsis_core.h>
39
40 #define PIN_XL1 0
41 #define PIN_XL2 1
42
43 #define RTC1_PRETICK_CC_CHAN (RTC1_CC_NUM - 1)
44
45 /* Mask of CC channels capable of generating interrupts, see nrf_rtc_timer.c */
46 #define RTC1_PRETICK_SELECTED_CC_MASK BIT_MASK(CONFIG_NRF_RTC_TIMER_USER_CHAN_COUNT + 1U)
47 #define RTC0_PRETICK_SELECTED_CC_MASK BIT_MASK(NRF_RTC_CC_COUNT_MAX)
48
49 #if defined(CONFIG_SOC_NRF_GPIO_FORWARDER_FOR_NRF5340)
50 #define GPIOS_PSEL_BY_IDX(node_id, prop, idx) \
51 NRF_DT_GPIOS_TO_PSEL_BY_IDX(node_id, prop, idx),
52 #define ALL_GPIOS_IN_NODE(node_id) \
53 DT_FOREACH_PROP_ELEM(node_id, gpios, GPIOS_PSEL_BY_IDX)
54 #define ALL_GPIOS_IN_FORWARDER(node_id) \
55 DT_FOREACH_CHILD(node_id, ALL_GPIOS_IN_NODE)
56 #endif
57
58 #define LOG_LEVEL CONFIG_SOC_LOG_LEVEL
59 LOG_MODULE_REGISTER(soc);
60
61
62 #if defined(CONFIG_SOC_NRF53_ANOMALY_160_WORKAROUND)
63 /* This code prevents the CPU from entering sleep again if it already
64 * entered sleep 5 times within last 200 us.
65 */
nrf53_anomaly_160_check(void)66 static bool nrf53_anomaly_160_check(void)
67 {
68 /* System clock cycles needed to cover 200 us window. */
69 const uint32_t window_cycles =
70 DIV_ROUND_UP(200 * CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC,
71 1000000);
72 static uint32_t timestamps[5];
73 static bool timestamps_filled;
74 static uint8_t current;
75 uint8_t oldest = (current + 1) % ARRAY_SIZE(timestamps);
76 uint32_t now = k_cycle_get_32();
77
78 if (timestamps_filled &&
79 /* + 1 because only fully elapsed cycles need to be counted. */
80 (now - timestamps[oldest]) < (window_cycles + 1)) {
81 return false;
82 }
83
84 /* Check if the CPU actually entered sleep since the last visit here
85 * (WFE/WFI could return immediately if the wake-up event was already
86 * registered).
87 */
88 if (nrf_power_event_check(NRF_POWER, NRF_POWER_EVENT_SLEEPENTER)) {
89 nrf_power_event_clear(NRF_POWER, NRF_POWER_EVENT_SLEEPENTER);
90 /* If so, update the index at which the current timestamp is
91 * to be stored so that it replaces the oldest one, otherwise
92 * (when the CPU did not sleep), the recently stored timestamp
93 * is updated.
94 */
95 current = oldest;
96 if (current == 0) {
97 timestamps_filled = true;
98 }
99 }
100
101 timestamps[current] = k_cycle_get_32();
102
103 return true;
104 }
105 #endif /* CONFIG_SOC_NRF53_ANOMALY_160_WORKAROUND */
106
107 #if defined(CONFIG_SOC_NRF53_RTC_PRETICK) && defined(CONFIG_SOC_NRF5340_CPUNET)
108
109 BUILD_ASSERT(!IS_ENABLED(CONFIG_WDT_NRFX),
110 "For CONFIG_SOC_NRF53_RTC_PRETICK watchdog is used internally for the pre-tick workaround on nRF5340 cpunet. Application cannot use the watchdog.");
111
rtc_counter_sub(uint32_t a,uint32_t b)112 static inline uint32_t rtc_counter_sub(uint32_t a, uint32_t b)
113 {
114 return (a - b) & NRF_RTC_COUNTER_MAX;
115 }
116
rtc_ticks_to_next_event_get(NRF_RTC_Type * rtc,uint32_t selected_cc_mask,uint32_t cntr,uint32_t * ticks_to_next_event)117 static bool rtc_ticks_to_next_event_get(NRF_RTC_Type *rtc, uint32_t selected_cc_mask, uint32_t cntr,
118 uint32_t *ticks_to_next_event)
119 {
120 bool result = false;
121
122 /* Let's preload register to speed-up. */
123 uint32_t reg_intenset = rtc->INTENSET;
124
125 /* Note: TICK event not handled. */
126
127 if (reg_intenset & NRF_RTC_INT_OVERFLOW_MASK) {
128 /* Overflow can generate an interrupt. */
129 *ticks_to_next_event = NRF_RTC_COUNTER_MAX + 1U - cntr;
130 result = true;
131 }
132
133 for (uint32_t chan = 0; chan < NRF_RTC_CC_COUNT_MAX; chan++) {
134 if ((selected_cc_mask & (1U << chan)) &&
135 (reg_intenset & NRF_RTC_CHANNEL_INT_MASK(chan))) {
136 /* The CC is in selected mask and is can generate an interrupt. */
137 uint32_t cc = nrf_rtc_cc_get(rtc, chan);
138 uint32_t ticks_to_fire = rtc_counter_sub(cc, cntr);
139
140 if (ticks_to_fire == 0U) {
141 /* When ticks_to_fire == 0, the event should have been just
142 * generated the interrupt can be already handled or be pending.
143 * However the next event is expected to be after counter wraps.
144 */
145 ticks_to_fire = NRF_RTC_COUNTER_MAX + 1U;
146 }
147
148 if (!result) {
149 *ticks_to_next_event = ticks_to_fire;
150 result = true;
151 } else if (ticks_to_fire < *ticks_to_next_event) {
152 *ticks_to_next_event = ticks_to_fire;
153 result = true;
154 } else {
155 /* CC that fires no earlier than already found. */
156 }
157 }
158 }
159
160 return result;
161 }
162
rtc_counter_synchronized_get(NRF_RTC_Type * rtc_a,NRF_RTC_Type * rtc_b,uint32_t * counter_a,uint32_t * counter_b)163 static void rtc_counter_synchronized_get(NRF_RTC_Type *rtc_a, NRF_RTC_Type *rtc_b,
164 uint32_t *counter_a, uint32_t *counter_b)
165 {
166 do {
167 *counter_a = nrf_rtc_counter_get(rtc_a);
168 barrier_dmem_fence_full();
169 *counter_b = nrf_rtc_counter_get(rtc_b);
170 barrier_dmem_fence_full();
171 } while (*counter_a != nrf_rtc_counter_get(rtc_a));
172 }
173
174 static uint8_t cpu_idle_prepare_monitor_dummy;
175 static bool cpu_idle_prepare_allows_sleep;
176
cpu_idle_prepare_monitor_begin(void)177 static void cpu_idle_prepare_monitor_begin(void)
178 {
179 __LDREXB(&cpu_idle_prepare_monitor_dummy);
180 }
181
182 /* Returns 0 if no exception preempted since the last call to cpu_idle_prepare_monitor_begin. */
cpu_idle_prepare_monitor_end(void)183 static bool cpu_idle_prepare_monitor_end(void)
184 {
185 /* The value stored is irrelevant. If any exception took place after
186 * cpu_idle_prepare_monitor_begin, the local monitor is cleared and
187 * the store fails returning 1.
188 * See Arm v8-M Architecture Reference Manual:
189 * Chapter B9.2 The local monitors
190 * Chapter B9.4 Exclusive access instructions and the monitors
191 * See Arm Cortex-M33 Processor Technical Reference Manual
192 * Chapter 3.5 Exclusive monitor
193 */
194 return __STREXB(0U, &cpu_idle_prepare_monitor_dummy);
195 }
196
rtc_pretick_finish_previous(void)197 static void rtc_pretick_finish_previous(void)
198 {
199 NRF_IPC->PUBLISH_RECEIVE[CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET] &=
200 ~IPC_PUBLISH_RECEIVE_EN_Msk;
201
202 nrf_rtc_event_clear(NRF_RTC1, NRF_RTC_CHANNEL_EVENT_ADDR(RTC1_PRETICK_CC_CHAN));
203 }
204
205
z_arm_on_enter_cpu_idle_prepare(void)206 void z_arm_on_enter_cpu_idle_prepare(void)
207 {
208 bool ok_to_sleep = true;
209
210 cpu_idle_prepare_monitor_begin();
211
212 uint32_t rtc_counter = 0U;
213 uint32_t rtc_ticks_to_next_event = 0U;
214 uint32_t rtc0_counter = 0U;
215 uint32_t rtc0_ticks_to_next_event = 0U;
216
217 rtc_counter_synchronized_get(NRF_RTC1, NRF_RTC0, &rtc_counter, &rtc0_counter);
218
219 bool rtc_scheduled = rtc_ticks_to_next_event_get(NRF_RTC1, RTC1_PRETICK_SELECTED_CC_MASK,
220 rtc_counter, &rtc_ticks_to_next_event);
221
222 if (rtc_ticks_to_next_event_get(NRF_RTC0, RTC0_PRETICK_SELECTED_CC_MASK, rtc0_counter,
223 &rtc0_ticks_to_next_event)) {
224 /* An event is scheduled on RTC0. */
225 if (!rtc_scheduled) {
226 rtc_ticks_to_next_event = rtc0_ticks_to_next_event;
227 rtc_scheduled = true;
228 } else if (rtc0_ticks_to_next_event < rtc_ticks_to_next_event) {
229 rtc_ticks_to_next_event = rtc0_ticks_to_next_event;
230 } else {
231 /* Event on RTC0 will not happen earlier than already found. */
232 }
233 }
234
235 if (rtc_scheduled) {
236 static bool rtc_pretick_cc_set_on_time;
237 /* The pretick should happen 1 tick before the earliest scheduled event
238 * that can trigger an interrupt.
239 */
240 uint32_t rtc_pretick_cc_val = (rtc_counter + rtc_ticks_to_next_event - 1U)
241 & NRF_RTC_COUNTER_MAX;
242
243 if (rtc_pretick_cc_val != nrf_rtc_cc_get(NRF_RTC1, RTC1_PRETICK_CC_CHAN)) {
244 /* The CC for pretick needs to be updated. */
245 rtc_pretick_finish_previous();
246 nrf_rtc_cc_set(NRF_RTC1, RTC1_PRETICK_CC_CHAN, rtc_pretick_cc_val);
247
248 if (rtc_ticks_to_next_event >= NRF_RTC_COUNTER_MAX/2) {
249 /* Pretick is scheduled so far in the future, assumed on time. */
250 rtc_pretick_cc_set_on_time = true;
251 } else {
252 /* Let's check if we updated CC on time, so that the CC can
253 * take effect.
254 */
255 barrier_dmem_fence_full();
256 rtc_counter = nrf_rtc_counter_get(NRF_RTC1);
257 uint32_t pretick_cc_to_counter =
258 rtc_counter_sub(rtc_pretick_cc_val, rtc_counter);
259
260 if ((pretick_cc_to_counter < 3) ||
261 (pretick_cc_to_counter >= NRF_RTC_COUNTER_MAX/2)) {
262 /* The COUNTER value is close enough to the expected
263 * pretick CC or has just expired, so the pretick event
264 * generation is not guaranteed.
265 */
266 rtc_pretick_cc_set_on_time = false;
267 } else {
268 /* The written rtc_pretick_cc is guaranteed to trigger
269 * compare event.
270 */
271 rtc_pretick_cc_set_on_time = true;
272 }
273 }
274 } else {
275 /* The CC for pretick doesn't need to be updated, however
276 * rtc_pretick_cc_set_on_time still holds if we managed to set it on time.
277 */
278 }
279
280 /* If the CC for pretick is set on time, so the pretick CC event can be reliably
281 * generated then allow to sleep. Otherwise (the CC for pretick cannot be reliably
282 * generated, because CC was set very short to it's fire time) sleep not at all.
283 */
284 ok_to_sleep = rtc_pretick_cc_set_on_time;
285 } else {
286 /* No events on any RTC timers are scheduled. */
287 }
288
289 if (ok_to_sleep) {
290 NRF_IPC->PUBLISH_RECEIVE[CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET] |=
291 IPC_PUBLISH_RECEIVE_EN_Msk;
292 if (!nrf_rtc_event_check(NRF_RTC1,
293 NRF_RTC_CHANNEL_EVENT_ADDR(RTC1_PRETICK_CC_CHAN))) {
294 NRF_WDT->TASKS_STOP = 1;
295 /* Check if any event did not occur after we checked for
296 * stopping condition. If yes, we might have stopped WDT
297 * when it should be running. Restart it.
298 */
299 if (nrf_rtc_event_check(NRF_RTC1,
300 NRF_RTC_CHANNEL_EVENT_ADDR(RTC1_PRETICK_CC_CHAN))) {
301 NRF_WDT->TASKS_START = 1;
302 }
303 }
304 }
305
306 cpu_idle_prepare_allows_sleep = ok_to_sleep;
307 }
308 #endif /* CONFIG_SOC_NRF53_RTC_PRETICK && CONFIG_SOC_NRF5340_CPUNET */
309
310 #if defined(CONFIG_SOC_NRF53_ANOMALY_160_WORKAROUND) || \
311 (defined(CONFIG_SOC_NRF53_RTC_PRETICK) && defined(CONFIG_SOC_NRF5340_CPUNET))
z_arm_on_enter_cpu_idle(void)312 bool z_arm_on_enter_cpu_idle(void)
313 {
314 bool ok_to_sleep = true;
315
316 #if defined(CONFIG_SOC_NRF53_RTC_PRETICK) && defined(CONFIG_SOC_NRF5340_CPUNET)
317 if (cpu_idle_prepare_monitor_end() == 0) {
318 /* No exception happened since cpu_idle_prepare_monitor_begin.
319 * We can trust the outcome of. z_arm_on_enter_cpu_idle_prepare
320 */
321 ok_to_sleep = cpu_idle_prepare_allows_sleep;
322 } else {
323 /* Exception happened since cpu_idle_prepare_monitor_begin.
324 * The values which z_arm_on_enter_cpu_idle_prepare could be changed
325 * by the exception, so we can not trust to it's outcome.
326 * Do not sleep at all, let's try in the next iteration of idle loop.
327 */
328 ok_to_sleep = false;
329 }
330 #endif
331
332 #if defined(CONFIG_SOC_NRF53_ANOMALY_160_WORKAROUND)
333 if (ok_to_sleep) {
334 ok_to_sleep = nrf53_anomaly_160_check();
335
336 #if (LOG_LEVEL >= LOG_LEVEL_DBG)
337 static bool suppress_message;
338
339 if (ok_to_sleep) {
340 suppress_message = false;
341 } else if (!suppress_message) {
342 LOG_DBG("Anomaly 160 trigger conditions detected.");
343 suppress_message = true;
344 }
345 #endif
346 }
347 #endif /* CONFIG_SOC_NRF53_ANOMALY_160_WORKAROUND */
348
349 #if defined(CONFIG_SOC_NRF53_RTC_PRETICK) && defined(CONFIG_SOC_NRF5340_CPUNET)
350 if (!ok_to_sleep) {
351 NRF_IPC->PUBLISH_RECEIVE[CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET] &=
352 ~IPC_PUBLISH_RECEIVE_EN_Msk;
353 NRF_WDT->TASKS_STOP = 1;
354 }
355 #endif
356
357 return ok_to_sleep;
358 }
359 #endif /* CONFIG_SOC_NRF53_ANOMALY_160_WORKAROUND ||
360 * (CONFIG_SOC_NRF53_RTC_PRETICK && CONFIG_SOC_NRF5340_CPUNET)
361 */
362
363 #if CONFIG_SOC_NRF53_RTC_PRETICK
364 #ifdef CONFIG_SOC_NRF5340_CPUAPP
365 /* RTC pretick - application core part. */
rtc_pretick_cpuapp_init(void)366 static int rtc_pretick_cpuapp_init(void)
367 {
368 uint8_t ch;
369 nrfx_err_t err;
370 nrf_ipc_event_t ipc_event =
371 nrf_ipc_receive_event_get(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_FROM_NET);
372 nrf_ipc_task_t ipc_task =
373 nrf_ipc_send_task_get(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET);
374 uint32_t task_ipc = nrf_ipc_task_address_get(NRF_IPC, ipc_task);
375 uint32_t evt_ipc = nrf_ipc_event_address_get(NRF_IPC, ipc_event);
376
377 err = nrfx_gppi_channel_alloc(&ch);
378 if (err != NRFX_SUCCESS) {
379 return -ENOMEM;
380 }
381
382 nrf_ipc_receive_config_set(NRF_IPC, CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_FROM_NET,
383 BIT(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_FROM_NET));
384 nrf_ipc_send_config_set(NRF_IPC, CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET,
385 BIT(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET));
386
387 nrfx_gppi_task_endpoint_setup(ch, task_ipc);
388 nrfx_gppi_event_endpoint_setup(ch, evt_ipc);
389 nrfx_gppi_channels_enable(BIT(ch));
390
391 return 0;
392 }
393 #else /* CONFIG_SOC_NRF5340_CPUNET */
394
rtc_pretick_rtc0_isr_hook(void)395 void rtc_pretick_rtc0_isr_hook(void)
396 {
397 rtc_pretick_finish_previous();
398 }
399
rtc_pretick_rtc1_isr_hook(void)400 void rtc_pretick_rtc1_isr_hook(void)
401 {
402 rtc_pretick_finish_previous();
403 }
404
rtc_pretick_cpunet_init(void)405 static int rtc_pretick_cpunet_init(void)
406 {
407 uint8_t ppi_ch;
408 nrf_ipc_task_t ipc_task =
409 nrf_ipc_send_task_get(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_FROM_NET);
410 nrf_ipc_event_t ipc_event =
411 nrf_ipc_receive_event_get(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET);
412 uint32_t task_ipc = nrf_ipc_task_address_get(NRF_IPC, ipc_task);
413 uint32_t evt_ipc = nrf_ipc_event_address_get(NRF_IPC, ipc_event);
414 uint32_t task_wdt = nrf_wdt_task_address_get(NRF_WDT, NRF_WDT_TASK_START);
415 uint32_t evt_cc = nrf_rtc_event_address_get(NRF_RTC1,
416 NRF_RTC_CHANNEL_EVENT_ADDR(RTC1_PRETICK_CC_CHAN));
417
418 /* Configure Watchdog to allow stopping. */
419 nrf_wdt_behaviour_set(NRF_WDT, WDT_CONFIG_STOPEN_Msk | BIT(4));
420 *((volatile uint32_t *)0x41203120) = 0x14;
421
422 /* Configure IPC */
423 nrf_ipc_receive_config_set(NRF_IPC, CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET,
424 BIT(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET));
425 nrf_ipc_send_config_set(NRF_IPC, CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_FROM_NET,
426 BIT(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_FROM_NET));
427
428 /* Allocate PPI channel for RTC Compare event publishers that starts WDT. */
429 nrfx_err_t err = nrfx_gppi_channel_alloc(&ppi_ch);
430
431 if (err != NRFX_SUCCESS) {
432 return -ENOMEM;
433 }
434
435 nrfx_gppi_event_endpoint_setup(ppi_ch, evt_cc);
436 nrfx_gppi_task_endpoint_setup(ppi_ch, task_ipc);
437 nrfx_gppi_event_endpoint_setup(ppi_ch, evt_ipc);
438 nrfx_gppi_task_endpoint_setup(ppi_ch, task_wdt);
439 nrfx_gppi_channels_enable(BIT(ppi_ch));
440
441 nrf_rtc_event_enable(NRF_RTC1, NRF_RTC_CHANNEL_INT_MASK(RTC1_PRETICK_CC_CHAN));
442 nrf_rtc_event_clear(NRF_RTC1, NRF_RTC_CHANNEL_EVENT_ADDR(RTC1_PRETICK_CC_CHAN));
443
444 return 0;
445 }
446 #endif /* CONFIG_SOC_NRF5340_CPUNET */
447
rtc_pretick_init(void)448 static int rtc_pretick_init(void)
449 {
450 #ifdef CONFIG_SOC_NRF5340_CPUAPP
451 return rtc_pretick_cpuapp_init();
452 #else
453 return rtc_pretick_cpunet_init();
454 #endif
455 }
456 #endif /* CONFIG_SOC_NRF53_RTC_PRETICK */
457
458
nordicsemi_nrf53_init(void)459 static int nordicsemi_nrf53_init(void)
460 {
461 #if defined(CONFIG_SOC_NRF5340_CPUAPP) && defined(CONFIG_NRF_ENABLE_CACHE)
462 #if !defined(CONFIG_BUILD_WITH_TFM)
463 /* Enable the instruction & data cache.
464 * This can only be done from secure code.
465 * This is handled by the TF-M platform so we skip it when TF-M is
466 * enabled.
467 */
468 nrf_cache_enable(NRF_CACHE);
469 #endif
470 #elif defined(CONFIG_SOC_NRF5340_CPUNET) && defined(CONFIG_NRF_ENABLE_CACHE)
471 nrf_nvmc_icache_config_set(NRF_NVMC, NRF_NVMC_ICACHE_ENABLE);
472 #endif
473
474 #if defined(CONFIG_SOC_ENABLE_LFXO)
475 nrf_oscillators_lfxo_cap_set(NRF_OSCILLATORS,
476 IS_ENABLED(CONFIG_SOC_LFXO_CAP_INT_6PF) ?
477 NRF_OSCILLATORS_LFXO_CAP_6PF :
478 IS_ENABLED(CONFIG_SOC_LFXO_CAP_INT_7PF) ?
479 NRF_OSCILLATORS_LFXO_CAP_7PF :
480 IS_ENABLED(CONFIG_SOC_LFXO_CAP_INT_9PF) ?
481 NRF_OSCILLATORS_LFXO_CAP_9PF :
482 NRF_OSCILLATORS_LFXO_CAP_EXTERNAL);
483 #if !defined(CONFIG_BUILD_WITH_TFM)
484 /* This can only be done from secure code.
485 * This is handled by the TF-M platform so we skip it when TF-M is
486 * enabled.
487 */
488 nrf_gpio_pin_control_select(PIN_XL1, NRF_GPIO_PIN_SEL_PERIPHERAL);
489 nrf_gpio_pin_control_select(PIN_XL2, NRF_GPIO_PIN_SEL_PERIPHERAL);
490 #endif /* !defined(CONFIG_BUILD_WITH_TFM) */
491 #endif /* defined(CONFIG_SOC_ENABLE_LFXO) */
492 #if defined(CONFIG_SOC_HFXO_CAP_INTERNAL)
493 /* This register is only accessible from secure code. */
494 uint32_t xosc32mtrim = soc_secure_read_xosc32mtrim();
495 /* The SLOPE field is in the two's complement form, hence this special
496 * handling. Ideally, it would result in just one SBFX instruction for
497 * extracting the slope value, at least gcc is capable of producing such
498 * output, but since the compiler apparently tries first to optimize
499 * additions and subtractions, it generates slightly less than optimal
500 * code.
501 */
502 uint32_t slope_field = (xosc32mtrim & FICR_XOSC32MTRIM_SLOPE_Msk)
503 >> FICR_XOSC32MTRIM_SLOPE_Pos;
504 uint32_t slope_mask = FICR_XOSC32MTRIM_SLOPE_Msk
505 >> FICR_XOSC32MTRIM_SLOPE_Pos;
506 uint32_t slope_sign = (slope_mask - (slope_mask >> 1));
507 int32_t slope = (int32_t)(slope_field ^ slope_sign) - (int32_t)slope_sign;
508 uint32_t offset = (xosc32mtrim & FICR_XOSC32MTRIM_OFFSET_Msk)
509 >> FICR_XOSC32MTRIM_OFFSET_Pos;
510 /* As specified in the nRF5340 PS:
511 * CAPVALUE = (((FICR->XOSC32MTRIM.SLOPE+56)*(CAPACITANCE*2-14))
512 * +((FICR->XOSC32MTRIM.OFFSET-8)<<4)+32)>>6;
513 * where CAPACITANCE is the desired capacitor value in pF, holding any
514 * value between 7.0 pF and 20.0 pF in 0.5 pF steps.
515 */
516 uint32_t capvalue =
517 ((slope + 56) * (CONFIG_SOC_HFXO_CAP_INT_VALUE_X2 - 14)
518 + ((offset - 8) << 4) + 32) >> 6;
519
520 nrf_oscillators_hfxo_cap_set(NRF_OSCILLATORS, true, capvalue);
521 #elif defined(CONFIG_SOC_HFXO_CAP_EXTERNAL)
522 nrf_oscillators_hfxo_cap_set(NRF_OSCILLATORS, false, 0);
523 #endif
524
525 #if defined(CONFIG_SOC_DCDC_NRF53X_APP)
526 nrf_regulators_vreg_enable_set(NRF_REGULATORS, NRF_REGULATORS_VREG_MAIN, true);
527 #endif
528 #if defined(CONFIG_SOC_DCDC_NRF53X_NET)
529 nrf_regulators_vreg_enable_set(NRF_REGULATORS, NRF_REGULATORS_VREG_RADIO, true);
530 #endif
531 #if defined(CONFIG_SOC_DCDC_NRF53X_HV)
532 nrf_regulators_vreg_enable_set(NRF_REGULATORS, NRF_REGULATORS_VREG_HIGH, true);
533 #endif
534
535 #if defined(CONFIG_SOC_NRF_GPIO_FORWARDER_FOR_NRF5340)
536 static const uint8_t forwarded_psels[] = {
537 DT_FOREACH_STATUS_OKAY(nordic_nrf_gpio_forwarder, ALL_GPIOS_IN_FORWARDER)
538 };
539
540 for (int i = 0; i < ARRAY_SIZE(forwarded_psels); i++) {
541 soc_secure_gpio_pin_mcu_select(forwarded_psels[i], NRF_GPIO_PIN_SEL_NETWORK);
542 }
543
544 #endif
545
546 return 0;
547 }
548
arch_busy_wait(uint32_t time_us)549 void arch_busy_wait(uint32_t time_us)
550 {
551 nrfx_coredep_delay_us(time_us);
552 }
553
554 SYS_INIT(nordicsemi_nrf53_init, PRE_KERNEL_1, 0);
555
556 #ifdef CONFIG_SOC_NRF53_RTC_PRETICK
557 SYS_INIT(rtc_pretick_init, POST_KERNEL, 0);
558 #endif
559