1 /*
2 * Copyright (c) 2019 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief System/hardware module for Nordic Semiconductor nRF53 family processor
10 *
11 * This module provides routines to initialize and support board-level hardware
12 * for the Nordic Semiconductor nRF53 family processor.
13 */
14
15 #include <zephyr/kernel.h>
16 #include <zephyr/init.h>
17 #include <zephyr/sys/barrier.h>
18 #include <zephyr/dt-bindings/regulator/nrf5x.h>
19 #include <soc/nrfx_coredep.h>
20 #include <zephyr/logging/log.h>
21 #include <nrf_erratas.h>
22 #include <hal/nrf_power.h>
23 #include <hal/nrf_ipc.h>
24 #include <helpers/nrfx_gppi.h>
25 #if defined(CONFIG_SOC_NRF5340_CPUAPP)
26 #include <zephyr/drivers/gpio.h>
27 #include <zephyr/devicetree.h>
28 #include <hal/nrf_cache.h>
29 #include <hal/nrf_gpio.h>
30 #include <hal/nrf_oscillators.h>
31 #include <hal/nrf_regulators.h>
32 #elif defined(CONFIG_SOC_NRF5340_CPUNET)
33 #include <hal/nrf_nvmc.h>
34 #endif
35 #include <hal/nrf_wdt.h>
36 #include <hal/nrf_rtc.h>
37 #include <soc_secure.h>
38
39 #include <cmsis_core.h>
40
41 #define PIN_XL1 0
42 #define PIN_XL2 1
43
44 #define RTC1_PRETICK_CC_CHAN (RTC1_CC_NUM - 1)
45
46 /* Mask of CC channels capable of generating interrupts, see nrf_rtc_timer.c */
47 #define RTC1_PRETICK_SELECTED_CC_MASK BIT_MASK(CONFIG_NRF_RTC_TIMER_USER_CHAN_COUNT + 1U)
48 #define RTC0_PRETICK_SELECTED_CC_MASK BIT_MASK(NRF_RTC_CC_COUNT_MAX)
49
50 #if defined(CONFIG_SOC_NRF_GPIO_FORWARDER_FOR_NRF5340)
51 #define GPIOS_PSEL_BY_IDX(node_id, prop, idx) \
52 NRF_DT_GPIOS_TO_PSEL_BY_IDX(node_id, prop, idx),
53 #define ALL_GPIOS_IN_NODE(node_id) \
54 DT_FOREACH_PROP_ELEM(node_id, gpios, GPIOS_PSEL_BY_IDX)
55 #define ALL_GPIOS_IN_FORWARDER(node_id) \
56 DT_FOREACH_CHILD(node_id, ALL_GPIOS_IN_NODE)
57 #endif
58
59 #ifdef CONFIG_SOC_NRF5340_CPUAPP
60 #define LFXO_NODE DT_NODELABEL(lfxo)
61 #define HFXO_NODE DT_NODELABEL(hfxo)
62
63 /* LFXO config from DT */
64 #if DT_ENUM_HAS_VALUE(LFXO_NODE, load_capacitors, external)
65 #define LFXO_CAP NRF_OSCILLATORS_LFXO_CAP_EXTERNAL
66 #elif DT_ENUM_HAS_VALUE(LFXO_NODE, load_capacitors, internal)
67 #define LFXO_CAP (DT_ENUM_IDX(LFXO_NODE, load_capacitance_picofarad) + 1U)
68 #else
69 /* LFXO config from legacy Kconfig */
70 #if defined(CONFIG_SOC_LFXO_CAP_INT_6PF)
71 #define LFXO_CAP NRF_OSCILLATORS_LFXO_CAP_6PF
72 #elif defined(CONFIG_SOC_LFXO_CAP_INT_7PF)
73 #define LFXO_CAP NRF_OSCILLATORS_LFXO_CAP_7PF
74 #elif defined(CONFIG_SOC_LFXO_CAP_INT_9PF)
75 #define LFXO_CAP NRF_OSCILLATORS_LFXO_CAP_9PF
76 #else
77 #define LFXO_CAP NRF_OSCILLATORS_LFXO_CAP_EXTERNAL
78 #endif
79 #endif
80
81 /* HFXO config from DT */
82 #if DT_ENUM_HAS_VALUE(HFXO_NODE, load_capacitors, internal)
83 #define HFXO_CAP_VAL_X2 (DT_PROP(HFXO_NODE, load_capacitance_femtofarad)) * 2U / 1000U
84 #elif defined(CONFIG_SOC_HFXO_CAP_INTERNAL)
85 /* HFXO config from legacy Kconfig */
86 #define HFXO_CAP_VAL_X2 CONFIG_SOC_HFXO_CAP_INT_VALUE_X2
87 #endif
88 #endif /* CONFIG_SOC_NRF5340_CPUAPP */
89
90 #define LOG_LEVEL CONFIG_SOC_LOG_LEVEL
91 LOG_MODULE_REGISTER(soc);
92
93
94 #if defined(CONFIG_SOC_NRF53_ANOMALY_160_WORKAROUND)
95 /* This code prevents the CPU from entering sleep again if it already
96 * entered sleep 5 times within last 200 us.
97 */
nrf53_anomaly_160_check(void)98 static bool nrf53_anomaly_160_check(void)
99 {
100 /* System clock cycles needed to cover 200 us window. */
101 const uint32_t window_cycles =
102 DIV_ROUND_UP(200 * CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC,
103 1000000);
104 static uint32_t timestamps[5];
105 static bool timestamps_filled;
106 static uint8_t current;
107 uint8_t oldest = (current + 1) % ARRAY_SIZE(timestamps);
108 uint32_t now = k_cycle_get_32();
109
110 if (timestamps_filled &&
111 /* + 1 because only fully elapsed cycles need to be counted. */
112 (now - timestamps[oldest]) < (window_cycles + 1)) {
113 return false;
114 }
115
116 /* Check if the CPU actually entered sleep since the last visit here
117 * (WFE/WFI could return immediately if the wake-up event was already
118 * registered).
119 */
120 if (nrf_power_event_check(NRF_POWER, NRF_POWER_EVENT_SLEEPENTER)) {
121 nrf_power_event_clear(NRF_POWER, NRF_POWER_EVENT_SLEEPENTER);
122 /* If so, update the index at which the current timestamp is
123 * to be stored so that it replaces the oldest one, otherwise
124 * (when the CPU did not sleep), the recently stored timestamp
125 * is updated.
126 */
127 current = oldest;
128 if (current == 0) {
129 timestamps_filled = true;
130 }
131 }
132
133 timestamps[current] = k_cycle_get_32();
134
135 return true;
136 }
137 #endif /* CONFIG_SOC_NRF53_ANOMALY_160_WORKAROUND */
138
139 #if defined(CONFIG_SOC_NRF53_RTC_PRETICK) && defined(CONFIG_SOC_NRF5340_CPUNET)
140
141 BUILD_ASSERT(!IS_ENABLED(CONFIG_WDT_NRFX),
142 "For CONFIG_SOC_NRF53_RTC_PRETICK watchdog is used internally for the pre-tick workaround on nRF5340 cpunet. Application cannot use the watchdog.");
143
rtc_counter_sub(uint32_t a,uint32_t b)144 static inline uint32_t rtc_counter_sub(uint32_t a, uint32_t b)
145 {
146 return (a - b) & NRF_RTC_COUNTER_MAX;
147 }
148
rtc_ticks_to_next_event_get(NRF_RTC_Type * rtc,uint32_t selected_cc_mask,uint32_t cntr,uint32_t * ticks_to_next_event)149 static bool rtc_ticks_to_next_event_get(NRF_RTC_Type *rtc, uint32_t selected_cc_mask, uint32_t cntr,
150 uint32_t *ticks_to_next_event)
151 {
152 bool result = false;
153
154 /* Let's preload register to speed-up. */
155 uint32_t reg_intenset = rtc->INTENSET;
156
157 /* Note: TICK event not handled. */
158
159 if (reg_intenset & NRF_RTC_INT_OVERFLOW_MASK) {
160 /* Overflow can generate an interrupt. */
161 *ticks_to_next_event = NRF_RTC_COUNTER_MAX + 1U - cntr;
162 result = true;
163 }
164
165 for (uint32_t chan = 0; chan < NRF_RTC_CC_COUNT_MAX; chan++) {
166 if ((selected_cc_mask & (1U << chan)) &&
167 (reg_intenset & NRF_RTC_CHANNEL_INT_MASK(chan))) {
168 /* The CC is in selected mask and is can generate an interrupt. */
169 uint32_t cc = nrf_rtc_cc_get(rtc, chan);
170 uint32_t ticks_to_fire = rtc_counter_sub(cc, cntr);
171
172 if (ticks_to_fire == 0U) {
173 /* When ticks_to_fire == 0, the event should have been just
174 * generated the interrupt can be already handled or be pending.
175 * However the next event is expected to be after counter wraps.
176 */
177 ticks_to_fire = NRF_RTC_COUNTER_MAX + 1U;
178 }
179
180 if (!result) {
181 *ticks_to_next_event = ticks_to_fire;
182 result = true;
183 } else if (ticks_to_fire < *ticks_to_next_event) {
184 *ticks_to_next_event = ticks_to_fire;
185 result = true;
186 } else {
187 /* CC that fires no earlier than already found. */
188 }
189 }
190 }
191
192 return result;
193 }
194
rtc_counter_synchronized_get(NRF_RTC_Type * rtc_a,NRF_RTC_Type * rtc_b,uint32_t * counter_a,uint32_t * counter_b)195 static void rtc_counter_synchronized_get(NRF_RTC_Type *rtc_a, NRF_RTC_Type *rtc_b,
196 uint32_t *counter_a, uint32_t *counter_b)
197 {
198 do {
199 *counter_a = nrf_rtc_counter_get(rtc_a);
200 barrier_dmem_fence_full();
201 *counter_b = nrf_rtc_counter_get(rtc_b);
202 barrier_dmem_fence_full();
203 } while (*counter_a != nrf_rtc_counter_get(rtc_a));
204 }
205
206 static uint8_t cpu_idle_prepare_monitor_dummy;
207 static bool cpu_idle_prepare_allows_sleep;
208
cpu_idle_prepare_monitor_begin(void)209 static void cpu_idle_prepare_monitor_begin(void)
210 {
211 __LDREXB(&cpu_idle_prepare_monitor_dummy);
212 }
213
214 /* Returns 0 if no exception preempted since the last call to cpu_idle_prepare_monitor_begin. */
cpu_idle_prepare_monitor_end(void)215 static bool cpu_idle_prepare_monitor_end(void)
216 {
217 /* The value stored is irrelevant. If any exception took place after
218 * cpu_idle_prepare_monitor_begin, the local monitor is cleared and
219 * the store fails returning 1.
220 * See Arm v8-M Architecture Reference Manual:
221 * Chapter B9.2 The local monitors
222 * Chapter B9.4 Exclusive access instructions and the monitors
223 * See Arm Cortex-M33 Processor Technical Reference Manual
224 * Chapter 3.5 Exclusive monitor
225 */
226 return __STREXB(0U, &cpu_idle_prepare_monitor_dummy);
227 }
228
rtc_pretick_finish_previous(void)229 static void rtc_pretick_finish_previous(void)
230 {
231 NRF_IPC->PUBLISH_RECEIVE[CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET] &=
232 ~IPC_PUBLISH_RECEIVE_EN_Msk;
233
234 nrf_rtc_event_clear(NRF_RTC1, NRF_RTC_CHANNEL_EVENT_ADDR(RTC1_PRETICK_CC_CHAN));
235 }
236
237
z_arm_on_enter_cpu_idle_prepare(void)238 void z_arm_on_enter_cpu_idle_prepare(void)
239 {
240 bool ok_to_sleep = true;
241
242 cpu_idle_prepare_monitor_begin();
243
244 uint32_t rtc_counter = 0U;
245 uint32_t rtc_ticks_to_next_event = 0U;
246 uint32_t rtc0_counter = 0U;
247 uint32_t rtc0_ticks_to_next_event = 0U;
248
249 rtc_counter_synchronized_get(NRF_RTC1, NRF_RTC0, &rtc_counter, &rtc0_counter);
250
251 bool rtc_scheduled = rtc_ticks_to_next_event_get(NRF_RTC1, RTC1_PRETICK_SELECTED_CC_MASK,
252 rtc_counter, &rtc_ticks_to_next_event);
253
254 if (rtc_ticks_to_next_event_get(NRF_RTC0, RTC0_PRETICK_SELECTED_CC_MASK, rtc0_counter,
255 &rtc0_ticks_to_next_event)) {
256 /* An event is scheduled on RTC0. */
257 if (!rtc_scheduled) {
258 rtc_ticks_to_next_event = rtc0_ticks_to_next_event;
259 rtc_scheduled = true;
260 } else if (rtc0_ticks_to_next_event < rtc_ticks_to_next_event) {
261 rtc_ticks_to_next_event = rtc0_ticks_to_next_event;
262 } else {
263 /* Event on RTC0 will not happen earlier than already found. */
264 }
265 }
266
267 if (rtc_scheduled) {
268 static bool rtc_pretick_cc_set_on_time;
269 /* The pretick should happen 1 tick before the earliest scheduled event
270 * that can trigger an interrupt.
271 */
272 uint32_t rtc_pretick_cc_val = (rtc_counter + rtc_ticks_to_next_event - 1U)
273 & NRF_RTC_COUNTER_MAX;
274
275 if (rtc_pretick_cc_val != nrf_rtc_cc_get(NRF_RTC1, RTC1_PRETICK_CC_CHAN)) {
276 /* The CC for pretick needs to be updated. */
277 rtc_pretick_finish_previous();
278 nrf_rtc_cc_set(NRF_RTC1, RTC1_PRETICK_CC_CHAN, rtc_pretick_cc_val);
279
280 if (rtc_ticks_to_next_event >= NRF_RTC_COUNTER_MAX/2) {
281 /* Pretick is scheduled so far in the future, assumed on time. */
282 rtc_pretick_cc_set_on_time = true;
283 } else {
284 /* Let's check if we updated CC on time, so that the CC can
285 * take effect.
286 */
287 barrier_dmem_fence_full();
288 rtc_counter = nrf_rtc_counter_get(NRF_RTC1);
289 uint32_t pretick_cc_to_counter =
290 rtc_counter_sub(rtc_pretick_cc_val, rtc_counter);
291
292 if ((pretick_cc_to_counter < 3) ||
293 (pretick_cc_to_counter >= NRF_RTC_COUNTER_MAX/2)) {
294 /* The COUNTER value is close enough to the expected
295 * pretick CC or has just expired, so the pretick event
296 * generation is not guaranteed.
297 */
298 rtc_pretick_cc_set_on_time = false;
299 } else {
300 /* The written rtc_pretick_cc is guaranteed to trigger
301 * compare event.
302 */
303 rtc_pretick_cc_set_on_time = true;
304 }
305 }
306 } else {
307 /* The CC for pretick doesn't need to be updated, however
308 * rtc_pretick_cc_set_on_time still holds if we managed to set it on time.
309 */
310 }
311
312 /* If the CC for pretick is set on time, so the pretick CC event can be reliably
313 * generated then allow to sleep. Otherwise (the CC for pretick cannot be reliably
314 * generated, because CC was set very short to it's fire time) sleep not at all.
315 */
316 ok_to_sleep = rtc_pretick_cc_set_on_time;
317 } else {
318 /* No events on any RTC timers are scheduled. */
319 }
320
321 if (ok_to_sleep) {
322 NRF_IPC->PUBLISH_RECEIVE[CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET] |=
323 IPC_PUBLISH_RECEIVE_EN_Msk;
324 if (!nrf_rtc_event_check(NRF_RTC1,
325 NRF_RTC_CHANNEL_EVENT_ADDR(RTC1_PRETICK_CC_CHAN))) {
326 NRF_WDT->TASKS_STOP = 1;
327 /* Check if any event did not occur after we checked for
328 * stopping condition. If yes, we might have stopped WDT
329 * when it should be running. Restart it.
330 */
331 if (nrf_rtc_event_check(NRF_RTC1,
332 NRF_RTC_CHANNEL_EVENT_ADDR(RTC1_PRETICK_CC_CHAN))) {
333 NRF_WDT->TASKS_START = 1;
334 }
335 }
336 }
337
338 cpu_idle_prepare_allows_sleep = ok_to_sleep;
339 }
340 #endif /* CONFIG_SOC_NRF53_RTC_PRETICK && CONFIG_SOC_NRF5340_CPUNET */
341
342 #if defined(CONFIG_SOC_NRF53_ANOMALY_160_WORKAROUND) || \
343 (defined(CONFIG_SOC_NRF53_RTC_PRETICK) && defined(CONFIG_SOC_NRF5340_CPUNET))
z_arm_on_enter_cpu_idle(void)344 bool z_arm_on_enter_cpu_idle(void)
345 {
346 bool ok_to_sleep = true;
347
348 #if defined(CONFIG_SOC_NRF53_RTC_PRETICK) && defined(CONFIG_SOC_NRF5340_CPUNET)
349 if (cpu_idle_prepare_monitor_end() == 0) {
350 /* No exception happened since cpu_idle_prepare_monitor_begin.
351 * We can trust the outcome of. z_arm_on_enter_cpu_idle_prepare
352 */
353 ok_to_sleep = cpu_idle_prepare_allows_sleep;
354 } else {
355 /* Exception happened since cpu_idle_prepare_monitor_begin.
356 * The values which z_arm_on_enter_cpu_idle_prepare could be changed
357 * by the exception, so we can not trust to it's outcome.
358 * Do not sleep at all, let's try in the next iteration of idle loop.
359 */
360 ok_to_sleep = false;
361 }
362 #endif
363
364 #if defined(CONFIG_SOC_NRF53_ANOMALY_160_WORKAROUND)
365 if (ok_to_sleep) {
366 ok_to_sleep = nrf53_anomaly_160_check();
367
368 #if (LOG_LEVEL >= LOG_LEVEL_DBG)
369 static bool suppress_message;
370
371 if (ok_to_sleep) {
372 suppress_message = false;
373 } else if (!suppress_message) {
374 LOG_DBG("Anomaly 160 trigger conditions detected.");
375 suppress_message = true;
376 }
377 #endif
378 }
379 #endif /* CONFIG_SOC_NRF53_ANOMALY_160_WORKAROUND */
380
381 #if defined(CONFIG_SOC_NRF53_RTC_PRETICK) && defined(CONFIG_SOC_NRF5340_CPUNET)
382 if (!ok_to_sleep) {
383 NRF_IPC->PUBLISH_RECEIVE[CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET] &=
384 ~IPC_PUBLISH_RECEIVE_EN_Msk;
385 NRF_WDT->TASKS_STOP = 1;
386 }
387 #endif
388
389 return ok_to_sleep;
390 }
391 #endif /* CONFIG_SOC_NRF53_ANOMALY_160_WORKAROUND ||
392 * (CONFIG_SOC_NRF53_RTC_PRETICK && CONFIG_SOC_NRF5340_CPUNET)
393 */
394
395 #if CONFIG_SOC_NRF53_RTC_PRETICK
396 #ifdef CONFIG_SOC_NRF5340_CPUAPP
397 /* RTC pretick - application core part. */
rtc_pretick_cpuapp_init(void)398 static int rtc_pretick_cpuapp_init(void)
399 {
400 uint8_t ch;
401 nrfx_err_t err;
402 nrf_ipc_event_t ipc_event =
403 nrf_ipc_receive_event_get(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_FROM_NET);
404 nrf_ipc_task_t ipc_task =
405 nrf_ipc_send_task_get(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET);
406 uint32_t task_ipc = nrf_ipc_task_address_get(NRF_IPC, ipc_task);
407 uint32_t evt_ipc = nrf_ipc_event_address_get(NRF_IPC, ipc_event);
408
409 err = nrfx_gppi_channel_alloc(&ch);
410 if (err != NRFX_SUCCESS) {
411 return -ENOMEM;
412 }
413
414 nrf_ipc_receive_config_set(NRF_IPC, CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_FROM_NET,
415 BIT(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_FROM_NET));
416 nrf_ipc_send_config_set(NRF_IPC, CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET,
417 BIT(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET));
418
419 nrfx_gppi_task_endpoint_setup(ch, task_ipc);
420 nrfx_gppi_event_endpoint_setup(ch, evt_ipc);
421 nrfx_gppi_channels_enable(BIT(ch));
422
423 return 0;
424 }
425 #else /* CONFIG_SOC_NRF5340_CPUNET */
426
rtc_pretick_rtc0_isr_hook(void)427 void rtc_pretick_rtc0_isr_hook(void)
428 {
429 rtc_pretick_finish_previous();
430 }
431
rtc_pretick_rtc1_isr_hook(void)432 void rtc_pretick_rtc1_isr_hook(void)
433 {
434 rtc_pretick_finish_previous();
435 }
436
rtc_pretick_cpunet_init(void)437 static int rtc_pretick_cpunet_init(void)
438 {
439 uint8_t ppi_ch;
440 nrf_ipc_task_t ipc_task =
441 nrf_ipc_send_task_get(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_FROM_NET);
442 nrf_ipc_event_t ipc_event =
443 nrf_ipc_receive_event_get(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET);
444 uint32_t task_ipc = nrf_ipc_task_address_get(NRF_IPC, ipc_task);
445 uint32_t evt_ipc = nrf_ipc_event_address_get(NRF_IPC, ipc_event);
446 uint32_t task_wdt = nrf_wdt_task_address_get(NRF_WDT, NRF_WDT_TASK_START);
447 uint32_t evt_cc = nrf_rtc_event_address_get(NRF_RTC1,
448 NRF_RTC_CHANNEL_EVENT_ADDR(RTC1_PRETICK_CC_CHAN));
449
450 /* Configure Watchdog to allow stopping. */
451 nrf_wdt_behaviour_set(NRF_WDT, WDT_CONFIG_STOPEN_Msk | BIT(4));
452 *((volatile uint32_t *)0x41203120) = 0x14;
453
454 /* Configure IPC */
455 nrf_ipc_receive_config_set(NRF_IPC, CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET,
456 BIT(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET));
457 nrf_ipc_send_config_set(NRF_IPC, CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_FROM_NET,
458 BIT(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_FROM_NET));
459
460 /* Allocate PPI channel for RTC Compare event publishers that starts WDT. */
461 nrfx_err_t err = nrfx_gppi_channel_alloc(&ppi_ch);
462
463 if (err != NRFX_SUCCESS) {
464 return -ENOMEM;
465 }
466
467 nrfx_gppi_event_endpoint_setup(ppi_ch, evt_cc);
468 nrfx_gppi_task_endpoint_setup(ppi_ch, task_ipc);
469 nrfx_gppi_event_endpoint_setup(ppi_ch, evt_ipc);
470 nrfx_gppi_task_endpoint_setup(ppi_ch, task_wdt);
471 nrfx_gppi_channels_enable(BIT(ppi_ch));
472
473 nrf_rtc_event_enable(NRF_RTC1, NRF_RTC_CHANNEL_INT_MASK(RTC1_PRETICK_CC_CHAN));
474 nrf_rtc_event_clear(NRF_RTC1, NRF_RTC_CHANNEL_EVENT_ADDR(RTC1_PRETICK_CC_CHAN));
475
476 return 0;
477 }
478 #endif /* CONFIG_SOC_NRF5340_CPUNET */
479
rtc_pretick_init(void)480 static int rtc_pretick_init(void)
481 {
482 #ifdef CONFIG_SOC_NRF5340_CPUAPP
483 return rtc_pretick_cpuapp_init();
484 #else
485 return rtc_pretick_cpunet_init();
486 #endif
487 }
488 #endif /* CONFIG_SOC_NRF53_RTC_PRETICK */
489
490
nordicsemi_nrf53_init(void)491 static int nordicsemi_nrf53_init(void)
492 {
493 #if defined(CONFIG_SOC_NRF5340_CPUAPP) && defined(CONFIG_NRF_ENABLE_CACHE)
494 #if !defined(CONFIG_BUILD_WITH_TFM)
495 /* Enable the instruction & data cache.
496 * This can only be done from secure code.
497 * This is handled by the TF-M platform so we skip it when TF-M is
498 * enabled.
499 */
500 nrf_cache_enable(NRF_CACHE);
501 #endif
502 #elif defined(CONFIG_SOC_NRF5340_CPUNET) && defined(CONFIG_NRF_ENABLE_CACHE)
503 nrf_nvmc_icache_config_set(NRF_NVMC, NRF_NVMC_ICACHE_ENABLE);
504 #endif
505
506 #ifdef CONFIG_SOC_NRF5340_CPUAPP
507 #if defined(LFXO_CAP)
508 nrf_oscillators_lfxo_cap_set(NRF_OSCILLATORS, LFXO_CAP);
509 #if !defined(CONFIG_BUILD_WITH_TFM)
510 /* This can only be done from secure code.
511 * This is handled by the TF-M platform so we skip it when TF-M is
512 * enabled.
513 */
514 nrf_gpio_pin_control_select(PIN_XL1, NRF_GPIO_PIN_SEL_PERIPHERAL);
515 nrf_gpio_pin_control_select(PIN_XL2, NRF_GPIO_PIN_SEL_PERIPHERAL);
516 #endif /* !defined(CONFIG_BUILD_WITH_TFM) */
517 #endif /* defined(LFXO_CAP) */
518 #if defined(HFXO_CAP_VAL_X2)
519 /* This register is only accessible from secure code. */
520 uint32_t xosc32mtrim = soc_secure_read_xosc32mtrim();
521 /* The SLOPE field is in the two's complement form, hence this special
522 * handling. Ideally, it would result in just one SBFX instruction for
523 * extracting the slope value, at least gcc is capable of producing such
524 * output, but since the compiler apparently tries first to optimize
525 * additions and subtractions, it generates slightly less than optimal
526 * code.
527 */
528 uint32_t slope_field = (xosc32mtrim & FICR_XOSC32MTRIM_SLOPE_Msk)
529 >> FICR_XOSC32MTRIM_SLOPE_Pos;
530 uint32_t slope_mask = FICR_XOSC32MTRIM_SLOPE_Msk
531 >> FICR_XOSC32MTRIM_SLOPE_Pos;
532 uint32_t slope_sign = (slope_mask - (slope_mask >> 1));
533 int32_t slope = (int32_t)(slope_field ^ slope_sign) - (int32_t)slope_sign;
534 uint32_t offset = (xosc32mtrim & FICR_XOSC32MTRIM_OFFSET_Msk)
535 >> FICR_XOSC32MTRIM_OFFSET_Pos;
536 /* As specified in the nRF5340 PS:
537 * CAPVALUE = (((FICR->XOSC32MTRIM.SLOPE+56)*(CAPACITANCE*2-14))
538 * +((FICR->XOSC32MTRIM.OFFSET-8)<<4)+32)>>6;
539 * where CAPACITANCE is the desired capacitor value in pF, holding any
540 * value between 7.0 pF and 20.0 pF in 0.5 pF steps.
541 */
542 uint32_t capvalue =
543 ((slope + 56) * (HFXO_CAP_VAL_X2 - 14)
544 + ((offset - 8) << 4) + 32) >> 6;
545
546 nrf_oscillators_hfxo_cap_set(NRF_OSCILLATORS, true, capvalue);
547 #elif defined(CONFIG_SOC_HFXO_CAP_EXTERNAL) || \
548 DT_ENUM_HAS_VALUE(HFXO_NODE, load_capacitors, external)
549 nrf_oscillators_hfxo_cap_set(NRF_OSCILLATORS, false, 0);
550 #endif
551 #endif /* CONFIG_SOC_NRF5340_CPUAPP */
552
553 #if defined(CONFIG_SOC_DCDC_NRF53X_APP) || \
554 (DT_PROP(DT_NODELABEL(vregmain), regulator_initial_mode) == NRF5X_REG_MODE_DCDC)
555 nrf_regulators_vreg_enable_set(NRF_REGULATORS, NRF_REGULATORS_VREG_MAIN, true);
556 #endif
557 #if defined(CONFIG_SOC_DCDC_NRF53X_NET) || \
558 (DT_PROP(DT_NODELABEL(vregradio), regulator_initial_mode) == NRF5X_REG_MODE_DCDC)
559 nrf_regulators_vreg_enable_set(NRF_REGULATORS, NRF_REGULATORS_VREG_RADIO, true);
560 #endif
561 #if defined(CONFIG_SOC_DCDC_NRF53X_HV) || DT_NODE_HAS_STATUS_OKAY(DT_NODELABEL(vregh))
562 nrf_regulators_vreg_enable_set(NRF_REGULATORS, NRF_REGULATORS_VREG_HIGH, true);
563 #endif
564
565 #if defined(CONFIG_SOC_NRF_GPIO_FORWARDER_FOR_NRF5340)
566 static const uint8_t forwarded_psels[] = {
567 DT_FOREACH_STATUS_OKAY(nordic_nrf_gpio_forwarder, ALL_GPIOS_IN_FORWARDER)
568 };
569
570 for (int i = 0; i < ARRAY_SIZE(forwarded_psels); i++) {
571 soc_secure_gpio_pin_mcu_select(forwarded_psels[i], NRF_GPIO_PIN_SEL_NETWORK);
572 }
573
574 #endif
575
576 return 0;
577 }
578
arch_busy_wait(uint32_t time_us)579 void arch_busy_wait(uint32_t time_us)
580 {
581 nrfx_coredep_delay_us(time_us);
582 }
583
584 SYS_INIT(nordicsemi_nrf53_init, PRE_KERNEL_1, 0);
585
586 #ifdef CONFIG_SOC_NRF53_RTC_PRETICK
587 SYS_INIT(rtc_pretick_init, POST_KERNEL, 0);
588 #endif
589