1 /*
2 * Copyright (c) 2019 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief System/hardware module for Nordic Semiconductor nRF53 family processor
10 *
11 * This module provides routines to initialize and support board-level hardware
12 * for the Nordic Semiconductor nRF53 family processor.
13 */
14
15 #include <zephyr/kernel.h>
16 #include <zephyr/init.h>
17 #include <zephyr/sys/barrier.h>
18 #include <soc/nrfx_coredep.h>
19 #include <zephyr/logging/log.h>
20 #include <nrf_erratas.h>
21 #include <hal/nrf_power.h>
22 #include <hal/nrf_ipc.h>
23 #include <helpers/nrfx_gppi.h>
24 #if defined(CONFIG_SOC_NRF5340_CPUAPP)
25 #include <zephyr/drivers/gpio.h>
26 #include <zephyr/devicetree.h>
27 #include <hal/nrf_cache.h>
28 #include <hal/nrf_gpio.h>
29 #include <hal/nrf_oscillators.h>
30 #include <hal/nrf_regulators.h>
31 #elif defined(CONFIG_SOC_NRF5340_CPUNET)
32 #include <hal/nrf_nvmc.h>
33 #endif
34 #if defined(CONFIG_PM_S2RAM)
35 #include <hal/nrf_vmc.h>
36 #endif
37 #include <hal/nrf_wdt.h>
38 #include <hal/nrf_rtc.h>
39 #include <soc_secure.h>
40
41 #include <cmsis_core.h>
42
43 #define PIN_XL1 0
44 #define PIN_XL2 1
45
46 #define RTC1_PRETICK_CC_CHAN (RTC1_CC_NUM - 1)
47
48 /* Mask of CC channels capable of generating interrupts, see nrf_rtc_timer.c */
49 #define RTC1_PRETICK_SELECTED_CC_MASK BIT_MASK(CONFIG_NRF_RTC_TIMER_USER_CHAN_COUNT + 1U)
50 #define RTC0_PRETICK_SELECTED_CC_MASK BIT_MASK(NRF_RTC_CC_COUNT_MAX)
51
52 #if defined(CONFIG_SOC_NRF_GPIO_FORWARDER_FOR_NRF5340)
53 #define GPIOS_PSEL_BY_IDX(node_id, prop, idx) \
54 NRF_DT_GPIOS_TO_PSEL_BY_IDX(node_id, prop, idx),
55 #define ALL_GPIOS_IN_NODE(node_id) \
56 DT_FOREACH_PROP_ELEM(node_id, gpios, GPIOS_PSEL_BY_IDX)
57 #define ALL_GPIOS_IN_FORWARDER(node_id) \
58 DT_FOREACH_CHILD(node_id, ALL_GPIOS_IN_NODE)
59 #endif
60
61 #define LOG_LEVEL CONFIG_SOC_LOG_LEVEL
62 LOG_MODULE_REGISTER(soc);
63
64 #if defined(CONFIG_PM_S2RAM)
65
66 #if defined(CONFIG_SOC_NRF5340_CPUAPP)
67 #define RAM_N_BLOCK (8)
68 #elif defined(CONFIG_SOC_NRF5340_CPUNET)
69 #define RAM_N_BLOCK (4)
70 #endif /* CONFIG_SOC_NRF5340_CPUAPP || CONFIG_SOC_NRF5340_CPUNET */
71
72 #define MASK_ALL_SECT (VMC_RAM_POWER_S0RETENTION_Msk | VMC_RAM_POWER_S1RETENTION_Msk | \
73 VMC_RAM_POWER_S2RETENTION_Msk | VMC_RAM_POWER_S3RETENTION_Msk | \
74 VMC_RAM_POWER_S4RETENTION_Msk | VMC_RAM_POWER_S5RETENTION_Msk | \
75 VMC_RAM_POWER_S6RETENTION_Msk | VMC_RAM_POWER_S7RETENTION_Msk | \
76 VMC_RAM_POWER_S8RETENTION_Msk | VMC_RAM_POWER_S9RETENTION_Msk | \
77 VMC_RAM_POWER_S10RETENTION_Msk | VMC_RAM_POWER_S11RETENTION_Msk | \
78 VMC_RAM_POWER_S12RETENTION_Msk | VMC_RAM_POWER_S13RETENTION_Msk | \
79 VMC_RAM_POWER_S14RETENTION_Msk | VMC_RAM_POWER_S15RETENTION_Msk)
80
enable_ram_retention(void)81 static void enable_ram_retention(void)
82 {
83 /*
84 * Enable RAM retention for *ALL* the SRAM
85 */
86 for (size_t n = 0; n < RAM_N_BLOCK; n++) {
87 nrf_vmc_ram_block_retention_set(NRF_VMC, n, MASK_ALL_SECT);
88 }
89
90 }
91 #endif /* CONFIG_PM_S2RAM */
92
93 #if defined(CONFIG_SOC_NRF53_ANOMALY_160_WORKAROUND)
94 /* This code prevents the CPU from entering sleep again if it already
95 * entered sleep 5 times within last 200 us.
96 */
nrf53_anomaly_160_check(void)97 static bool nrf53_anomaly_160_check(void)
98 {
99 /* System clock cycles needed to cover 200 us window. */
100 const uint32_t window_cycles =
101 DIV_ROUND_UP(200 * CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC,
102 1000000);
103 static uint32_t timestamps[5];
104 static bool timestamps_filled;
105 static uint8_t current;
106 uint8_t oldest = (current + 1) % ARRAY_SIZE(timestamps);
107 uint32_t now = k_cycle_get_32();
108
109 if (timestamps_filled &&
110 /* + 1 because only fully elapsed cycles need to be counted. */
111 (now - timestamps[oldest]) < (window_cycles + 1)) {
112 return false;
113 }
114
115 /* Check if the CPU actually entered sleep since the last visit here
116 * (WFE/WFI could return immediately if the wake-up event was already
117 * registered).
118 */
119 if (nrf_power_event_check(NRF_POWER, NRF_POWER_EVENT_SLEEPENTER)) {
120 nrf_power_event_clear(NRF_POWER, NRF_POWER_EVENT_SLEEPENTER);
121 /* If so, update the index at which the current timestamp is
122 * to be stored so that it replaces the oldest one, otherwise
123 * (when the CPU did not sleep), the recently stored timestamp
124 * is updated.
125 */
126 current = oldest;
127 if (current == 0) {
128 timestamps_filled = true;
129 }
130 }
131
132 timestamps[current] = k_cycle_get_32();
133
134 return true;
135 }
136 #endif /* CONFIG_SOC_NRF53_ANOMALY_160_WORKAROUND */
137
138 #if defined(CONFIG_SOC_NRF53_RTC_PRETICK) && defined(CONFIG_SOC_NRF5340_CPUNET)
139
140 BUILD_ASSERT(!IS_ENABLED(CONFIG_WDT_NRFX),
141 "For CONFIG_SOC_NRF53_RTC_PRETICK watchdog is used internally for the pre-tick workaround on nRF5340 cpunet. Application cannot use the watchdog.");
142
rtc_counter_sub(uint32_t a,uint32_t b)143 static inline uint32_t rtc_counter_sub(uint32_t a, uint32_t b)
144 {
145 return (a - b) & NRF_RTC_COUNTER_MAX;
146 }
147
rtc_ticks_to_next_event_get(NRF_RTC_Type * rtc,uint32_t selected_cc_mask,uint32_t cntr,uint32_t * ticks_to_next_event)148 static bool rtc_ticks_to_next_event_get(NRF_RTC_Type *rtc, uint32_t selected_cc_mask, uint32_t cntr,
149 uint32_t *ticks_to_next_event)
150 {
151 bool result = false;
152
153 /* Let's preload register to speed-up. */
154 uint32_t reg_intenset = rtc->INTENSET;
155
156 /* Note: TICK event not handled. */
157
158 if (reg_intenset & NRF_RTC_INT_OVERFLOW_MASK) {
159 /* Overflow can generate an interrupt. */
160 *ticks_to_next_event = NRF_RTC_COUNTER_MAX + 1U - cntr;
161 result = true;
162 }
163
164 for (uint32_t chan = 0; chan < NRF_RTC_CC_COUNT_MAX; chan++) {
165 if ((selected_cc_mask & (1U << chan)) &&
166 (reg_intenset & NRF_RTC_CHANNEL_INT_MASK(chan))) {
167 /* The CC is in selected mask and is can generate an interrupt. */
168 uint32_t cc = nrf_rtc_cc_get(rtc, chan);
169 uint32_t ticks_to_fire = rtc_counter_sub(cc, cntr);
170
171 if (ticks_to_fire == 0U) {
172 /* When ticks_to_fire == 0, the event should have been just
173 * generated the interrupt can be already handled or be pending.
174 * However the next event is expected to be after counter wraps.
175 */
176 ticks_to_fire = NRF_RTC_COUNTER_MAX + 1U;
177 }
178
179 if (!result) {
180 *ticks_to_next_event = ticks_to_fire;
181 result = true;
182 } else if (ticks_to_fire < *ticks_to_next_event) {
183 *ticks_to_next_event = ticks_to_fire;
184 result = true;
185 } else {
186 /* CC that fires no earlier than already found. */
187 }
188 }
189 }
190
191 return result;
192 }
193
rtc_counter_synchronized_get(NRF_RTC_Type * rtc_a,NRF_RTC_Type * rtc_b,uint32_t * counter_a,uint32_t * counter_b)194 static void rtc_counter_synchronized_get(NRF_RTC_Type *rtc_a, NRF_RTC_Type *rtc_b,
195 uint32_t *counter_a, uint32_t *counter_b)
196 {
197 do {
198 *counter_a = nrf_rtc_counter_get(rtc_a);
199 barrier_dmem_fence_full();
200 *counter_b = nrf_rtc_counter_get(rtc_b);
201 barrier_dmem_fence_full();
202 } while (*counter_a != nrf_rtc_counter_get(rtc_a));
203 }
204
205 static uint8_t cpu_idle_prepare_monitor_dummy;
206 static bool cpu_idle_prepare_allows_sleep;
207
cpu_idle_prepare_monitor_begin(void)208 static void cpu_idle_prepare_monitor_begin(void)
209 {
210 __LDREXB(&cpu_idle_prepare_monitor_dummy);
211 }
212
213 /* Returns 0 if no exception preempted since the last call to cpu_idle_prepare_monitor_begin. */
cpu_idle_prepare_monitor_end(void)214 static bool cpu_idle_prepare_monitor_end(void)
215 {
216 /* The value stored is irrelevant. If any exception took place after
217 * cpu_idle_prepare_monitor_begin, the the local monitor is cleared and
218 * the store fails returning 1.
219 * See Arm v8-M Architecture Reference Manual:
220 * Chapter B9.2 The local monitors
221 * Chapter B9.4 Exclusive access instructions and the monitors
222 * See Arm Cortex-M33 Processor Technical Reference Manual
223 * Chapter 3.5 Exclusive monitor
224 */
225 return __STREXB(0U, &cpu_idle_prepare_monitor_dummy);
226 }
227
rtc_pretick_finish_previous(void)228 static void rtc_pretick_finish_previous(void)
229 {
230 NRF_IPC->PUBLISH_RECEIVE[CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET] &=
231 ~IPC_PUBLISH_RECEIVE_EN_Msk;
232
233 nrf_rtc_event_clear(NRF_RTC1, NRF_RTC_CHANNEL_EVENT_ADDR(RTC1_PRETICK_CC_CHAN));
234 }
235
236
z_arm_on_enter_cpu_idle_prepare(void)237 void z_arm_on_enter_cpu_idle_prepare(void)
238 {
239 bool ok_to_sleep = true;
240
241 cpu_idle_prepare_monitor_begin();
242
243 uint32_t rtc_counter = 0U;
244 uint32_t rtc_ticks_to_next_event = 0U;
245 uint32_t rtc0_counter = 0U;
246 uint32_t rtc0_ticks_to_next_event = 0U;
247
248 rtc_counter_synchronized_get(NRF_RTC1, NRF_RTC0, &rtc_counter, &rtc0_counter);
249
250 bool rtc_scheduled = rtc_ticks_to_next_event_get(NRF_RTC1, RTC1_PRETICK_SELECTED_CC_MASK,
251 rtc_counter, &rtc_ticks_to_next_event);
252
253 if (rtc_ticks_to_next_event_get(NRF_RTC0, RTC0_PRETICK_SELECTED_CC_MASK, rtc0_counter,
254 &rtc0_ticks_to_next_event)) {
255 /* An event is scheduled on RTC0. */
256 if (!rtc_scheduled) {
257 rtc_ticks_to_next_event = rtc0_ticks_to_next_event;
258 rtc_scheduled = true;
259 } else if (rtc0_ticks_to_next_event < rtc_ticks_to_next_event) {
260 rtc_ticks_to_next_event = rtc0_ticks_to_next_event;
261 } else {
262 /* Event on RTC0 will not happen earlier than already found. */
263 }
264 }
265
266 if (rtc_scheduled) {
267 static bool rtc_pretick_cc_set_on_time;
268 /* The pretick should happen 1 tick before the earliest scheduled event
269 * that can trigger an interrupt.
270 */
271 uint32_t rtc_pretick_cc_val = (rtc_counter + rtc_ticks_to_next_event - 1U)
272 & NRF_RTC_COUNTER_MAX;
273
274 if (rtc_pretick_cc_val != nrf_rtc_cc_get(NRF_RTC1, RTC1_PRETICK_CC_CHAN)) {
275 /* The CC for pretick needs to be updated. */
276 rtc_pretick_finish_previous();
277 nrf_rtc_cc_set(NRF_RTC1, RTC1_PRETICK_CC_CHAN, rtc_pretick_cc_val);
278
279 if (rtc_ticks_to_next_event >= NRF_RTC_COUNTER_MAX/2) {
280 /* Pretick is scheduled so far in the future, assumed on time. */
281 rtc_pretick_cc_set_on_time = true;
282 } else {
283 /* Let's check if we updated CC on time, so that the CC can
284 * take effect.
285 */
286 barrier_dmem_fence_full();
287 rtc_counter = nrf_rtc_counter_get(NRF_RTC1);
288 uint32_t pretick_cc_to_counter =
289 rtc_counter_sub(rtc_pretick_cc_val, rtc_counter);
290
291 if ((pretick_cc_to_counter < 3) ||
292 (pretick_cc_to_counter >= NRF_RTC_COUNTER_MAX/2)) {
293 /* The COUNTER value is close enough to the expected
294 * pretick CC or has just expired, so the pretick event
295 * generation is not guaranteed.
296 */
297 rtc_pretick_cc_set_on_time = false;
298 } else {
299 /* The written rtc_pretick_cc is guaranteed to to trigger
300 * compare event.
301 */
302 rtc_pretick_cc_set_on_time = true;
303 }
304 }
305 } else {
306 /* The CC for pretick doesn't need to be updated, however
307 * rtc_pretick_cc_set_on_time still holds if we managed to set it on time.
308 */
309 }
310
311 /* If the CC for pretick is set on time, so the pretick CC event can be reliably
312 * generated then allow to sleep. Otherwise (the CC for pretick cannot be reliably
313 * generated, because CC was set very short to it's fire time) sleep not at all.
314 */
315 ok_to_sleep = rtc_pretick_cc_set_on_time;
316 } else {
317 /* No events on any RTC timers are scheduled. */
318 }
319
320 if (ok_to_sleep) {
321 NRF_IPC->PUBLISH_RECEIVE[CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET] |=
322 IPC_PUBLISH_RECEIVE_EN_Msk;
323 if (!nrf_rtc_event_check(NRF_RTC1,
324 NRF_RTC_CHANNEL_EVENT_ADDR(RTC1_PRETICK_CC_CHAN))) {
325 NRF_WDT->TASKS_STOP = 1;
326 /* Check if any event did not occur after we checked for
327 * stopping condition. If yes, we might have stopped WDT
328 * when it should be running. Restart it.
329 */
330 if (nrf_rtc_event_check(NRF_RTC1,
331 NRF_RTC_CHANNEL_EVENT_ADDR(RTC1_PRETICK_CC_CHAN))) {
332 NRF_WDT->TASKS_START = 1;
333 }
334 }
335 }
336
337 cpu_idle_prepare_allows_sleep = ok_to_sleep;
338 }
339 #endif /* CONFIG_SOC_NRF53_RTC_PRETICK && CONFIG_SOC_NRF5340_CPUNET */
340
341 #if defined(CONFIG_SOC_NRF53_ANOMALY_160_WORKAROUND) || \
342 (defined(CONFIG_SOC_NRF53_RTC_PRETICK) && defined(CONFIG_SOC_NRF5340_CPUNET))
z_arm_on_enter_cpu_idle(void)343 bool z_arm_on_enter_cpu_idle(void)
344 {
345 bool ok_to_sleep = true;
346
347 #if defined(CONFIG_SOC_NRF53_RTC_PRETICK) && defined(CONFIG_SOC_NRF5340_CPUNET)
348 if (cpu_idle_prepare_monitor_end() == 0) {
349 /* No exception happened since cpu_idle_prepare_monitor_begin.
350 * We can trust the outcome of. z_arm_on_enter_cpu_idle_prepare
351 */
352 ok_to_sleep = cpu_idle_prepare_allows_sleep;
353 } else {
354 /* Exception happened since cpu_idle_prepare_monitor_begin.
355 * The values which z_arm_on_enter_cpu_idle_prepare could be changed
356 * by the exception, so we can not trust to it's outcome.
357 * Do not sleep at all, let's try in the next iteration of idle loop.
358 */
359 ok_to_sleep = false;
360 }
361 #endif
362
363 #if defined(CONFIG_SOC_NRF53_ANOMALY_160_WORKAROUND)
364 if (ok_to_sleep) {
365 ok_to_sleep = nrf53_anomaly_160_check();
366
367 #if (LOG_LEVEL >= LOG_LEVEL_DBG)
368 static bool suppress_message;
369
370 if (ok_to_sleep) {
371 suppress_message = false;
372 } else if (!suppress_message) {
373 LOG_DBG("Anomaly 160 trigger conditions detected.");
374 suppress_message = true;
375 }
376 #endif
377 }
378 #endif /* CONFIG_SOC_NRF53_ANOMALY_160_WORKAROUND */
379
380 #if defined(CONFIG_SOC_NRF53_RTC_PRETICK) && defined(CONFIG_SOC_NRF5340_CPUNET)
381 if (!ok_to_sleep) {
382 NRF_IPC->PUBLISH_RECEIVE[CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET] &=
383 ~IPC_PUBLISH_RECEIVE_EN_Msk;
384 NRF_WDT->TASKS_STOP = 1;
385 }
386 #endif
387
388 return ok_to_sleep;
389 }
390 #endif /* CONFIG_SOC_NRF53_ANOMALY_160_WORKAROUND ||
391 * (CONFIG_SOC_NRF53_RTC_PRETICK && CONFIG_SOC_NRF5340_CPUNET)
392 */
393
394 #if CONFIG_SOC_NRF53_RTC_PRETICK
395 #ifdef CONFIG_SOC_NRF5340_CPUAPP
396 /* RTC pretick - application core part. */
rtc_pretick_cpuapp_init(void)397 static int rtc_pretick_cpuapp_init(void)
398 {
399 uint8_t ch;
400 nrfx_err_t err;
401 nrf_ipc_event_t ipc_event =
402 nrf_ipc_receive_event_get(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_FROM_NET);
403 nrf_ipc_task_t ipc_task =
404 nrf_ipc_send_task_get(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET);
405 uint32_t task_ipc = nrf_ipc_task_address_get(NRF_IPC, ipc_task);
406 uint32_t evt_ipc = nrf_ipc_event_address_get(NRF_IPC, ipc_event);
407
408 err = nrfx_gppi_channel_alloc(&ch);
409 if (err != NRFX_SUCCESS) {
410 return -ENOMEM;
411 }
412
413 nrf_ipc_receive_config_set(NRF_IPC, CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_FROM_NET,
414 BIT(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_FROM_NET));
415 nrf_ipc_send_config_set(NRF_IPC, CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET,
416 BIT(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET));
417
418 nrfx_gppi_task_endpoint_setup(ch, task_ipc);
419 nrfx_gppi_event_endpoint_setup(ch, evt_ipc);
420 nrfx_gppi_channels_enable(BIT(ch));
421
422 return 0;
423 }
424 #else /* CONFIG_SOC_NRF5340_CPUNET */
425
rtc_pretick_rtc0_isr_hook(void)426 void rtc_pretick_rtc0_isr_hook(void)
427 {
428 rtc_pretick_finish_previous();
429 }
430
rtc_pretick_rtc1_isr_hook(void)431 void rtc_pretick_rtc1_isr_hook(void)
432 {
433 rtc_pretick_finish_previous();
434 }
435
rtc_pretick_cpunet_init(void)436 static int rtc_pretick_cpunet_init(void)
437 {
438 uint8_t ppi_ch;
439 nrf_ipc_task_t ipc_task =
440 nrf_ipc_send_task_get(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_FROM_NET);
441 nrf_ipc_event_t ipc_event =
442 nrf_ipc_receive_event_get(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET);
443 uint32_t task_ipc = nrf_ipc_task_address_get(NRF_IPC, ipc_task);
444 uint32_t evt_ipc = nrf_ipc_event_address_get(NRF_IPC, ipc_event);
445 uint32_t task_wdt = nrf_wdt_task_address_get(NRF_WDT, NRF_WDT_TASK_START);
446 uint32_t evt_cc = nrf_rtc_event_address_get(NRF_RTC1,
447 NRF_RTC_CHANNEL_EVENT_ADDR(RTC1_PRETICK_CC_CHAN));
448
449 /* Configure Watchdog to allow stopping. */
450 nrf_wdt_behaviour_set(NRF_WDT, WDT_CONFIG_STOPEN_Msk | BIT(4));
451 *((volatile uint32_t *)0x41203120) = 0x14;
452
453 /* Configure IPC */
454 nrf_ipc_receive_config_set(NRF_IPC, CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET,
455 BIT(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_TO_NET));
456 nrf_ipc_send_config_set(NRF_IPC, CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_FROM_NET,
457 BIT(CONFIG_SOC_NRF53_RTC_PRETICK_IPC_CH_FROM_NET));
458
459 /* Allocate PPI channel for RTC Compare event publishers that starts WDT. */
460 nrfx_err_t err = nrfx_gppi_channel_alloc(&ppi_ch);
461
462 if (err != NRFX_SUCCESS) {
463 return -ENOMEM;
464 }
465
466 nrfx_gppi_event_endpoint_setup(ppi_ch, evt_cc);
467 nrfx_gppi_task_endpoint_setup(ppi_ch, task_ipc);
468 nrfx_gppi_event_endpoint_setup(ppi_ch, evt_ipc);
469 nrfx_gppi_task_endpoint_setup(ppi_ch, task_wdt);
470 nrfx_gppi_channels_enable(BIT(ppi_ch));
471
472 nrf_rtc_event_enable(NRF_RTC1, NRF_RTC_CHANNEL_INT_MASK(RTC1_PRETICK_CC_CHAN));
473 nrf_rtc_event_clear(NRF_RTC1, NRF_RTC_CHANNEL_EVENT_ADDR(RTC1_PRETICK_CC_CHAN));
474
475 return 0;
476 }
477 #endif /* CONFIG_SOC_NRF5340_CPUNET */
478
rtc_pretick_init(void)479 static int rtc_pretick_init(void)
480 {
481 #ifdef CONFIG_SOC_NRF5340_CPUAPP
482 return rtc_pretick_cpuapp_init();
483 #else
484 return rtc_pretick_cpunet_init();
485 #endif
486 }
487 #endif /* CONFIG_SOC_NRF53_RTC_PRETICK */
488
489
nordicsemi_nrf53_init(void)490 static int nordicsemi_nrf53_init(void)
491 {
492 #if defined(CONFIG_SOC_NRF5340_CPUAPP) && defined(CONFIG_NRF_ENABLE_CACHE)
493 #if !defined(CONFIG_BUILD_WITH_TFM)
494 /* Enable the instruction & data cache.
495 * This can only be done from secure code.
496 * This is handled by the TF-M platform so we skip it when TF-M is
497 * enabled.
498 */
499 nrf_cache_enable(NRF_CACHE);
500 #endif
501 #elif defined(CONFIG_SOC_NRF5340_CPUNET) && defined(CONFIG_NRF_ENABLE_CACHE)
502 nrf_nvmc_icache_config_set(NRF_NVMC, NRF_NVMC_ICACHE_ENABLE);
503 #endif
504
505 #if defined(CONFIG_SOC_ENABLE_LFXO)
506 nrf_oscillators_lfxo_cap_set(NRF_OSCILLATORS,
507 IS_ENABLED(CONFIG_SOC_LFXO_CAP_INT_6PF) ?
508 NRF_OSCILLATORS_LFXO_CAP_6PF :
509 IS_ENABLED(CONFIG_SOC_LFXO_CAP_INT_7PF) ?
510 NRF_OSCILLATORS_LFXO_CAP_7PF :
511 IS_ENABLED(CONFIG_SOC_LFXO_CAP_INT_9PF) ?
512 NRF_OSCILLATORS_LFXO_CAP_9PF :
513 NRF_OSCILLATORS_LFXO_CAP_EXTERNAL);
514 #if !defined(CONFIG_BUILD_WITH_TFM)
515 /* This can only be done from secure code.
516 * This is handled by the TF-M platform so we skip it when TF-M is
517 * enabled.
518 */
519 nrf_gpio_pin_control_select(PIN_XL1, NRF_GPIO_PIN_SEL_PERIPHERAL);
520 nrf_gpio_pin_control_select(PIN_XL2, NRF_GPIO_PIN_SEL_PERIPHERAL);
521 #endif /* !defined(CONFIG_BUILD_WITH_TFM) */
522 #endif /* defined(CONFIG_SOC_ENABLE_LFXO) */
523 #if defined(CONFIG_SOC_HFXO_CAP_INTERNAL)
524 /* This register is only accessible from secure code. */
525 uint32_t xosc32mtrim = soc_secure_read_xosc32mtrim();
526 /* The SLOPE field is in the two's complement form, hence this special
527 * handling. Ideally, it would result in just one SBFX instruction for
528 * extracting the slope value, at least gcc is capable of producing such
529 * output, but since the compiler apparently tries first to optimize
530 * additions and subtractions, it generates slightly less than optimal
531 * code.
532 */
533 uint32_t slope_field = (xosc32mtrim & FICR_XOSC32MTRIM_SLOPE_Msk)
534 >> FICR_XOSC32MTRIM_SLOPE_Pos;
535 uint32_t slope_mask = FICR_XOSC32MTRIM_SLOPE_Msk
536 >> FICR_XOSC32MTRIM_SLOPE_Pos;
537 uint32_t slope_sign = (slope_mask - (slope_mask >> 1));
538 int32_t slope = (int32_t)(slope_field ^ slope_sign) - (int32_t)slope_sign;
539 uint32_t offset = (xosc32mtrim & FICR_XOSC32MTRIM_OFFSET_Msk)
540 >> FICR_XOSC32MTRIM_OFFSET_Pos;
541 /* As specified in the nRF5340 PS:
542 * CAPVALUE = (((FICR->XOSC32MTRIM.SLOPE+56)*(CAPACITANCE*2-14))
543 * +((FICR->XOSC32MTRIM.OFFSET-8)<<4)+32)>>6;
544 * where CAPACITANCE is the desired capacitor value in pF, holding any
545 * value between 7.0 pF and 20.0 pF in 0.5 pF steps.
546 */
547 uint32_t capvalue =
548 ((slope + 56) * (CONFIG_SOC_HFXO_CAP_INT_VALUE_X2 - 14)
549 + ((offset - 8) << 4) + 32) >> 6;
550
551 nrf_oscillators_hfxo_cap_set(NRF_OSCILLATORS, true, capvalue);
552 #elif defined(CONFIG_SOC_HFXO_CAP_EXTERNAL)
553 nrf_oscillators_hfxo_cap_set(NRF_OSCILLATORS, false, 0);
554 #endif
555
556 #if defined(CONFIG_SOC_DCDC_NRF53X_APP)
557 nrf_regulators_dcdcen_set(NRF_REGULATORS, true);
558 #endif
559 #if defined(CONFIG_SOC_DCDC_NRF53X_NET)
560 nrf_regulators_dcdcen_radio_set(NRF_REGULATORS, true);
561 #endif
562 #if defined(CONFIG_SOC_DCDC_NRF53X_HV)
563 nrf_regulators_dcdcen_vddh_set(NRF_REGULATORS, true);
564 #endif
565
566 #if defined(CONFIG_SOC_NRF_GPIO_FORWARDER_FOR_NRF5340)
567 static const uint8_t forwarded_psels[] = {
568 DT_FOREACH_STATUS_OKAY(nordic_nrf_gpio_forwarder, ALL_GPIOS_IN_FORWARDER)
569 };
570
571 for (int i = 0; i < ARRAY_SIZE(forwarded_psels); i++) {
572 soc_secure_gpio_pin_mcu_select(forwarded_psels[i], NRF_GPIO_PIN_SEL_NETWORK);
573 }
574
575 #endif
576
577 #if defined(CONFIG_PM_S2RAM)
578 enable_ram_retention();
579 #endif /* CONFIG_PM_S2RAM */
580
581 return 0;
582 }
583
arch_busy_wait(uint32_t time_us)584 void arch_busy_wait(uint32_t time_us)
585 {
586 nrfx_coredep_delay_us(time_us);
587 }
588
589 SYS_INIT(nordicsemi_nrf53_init, PRE_KERNEL_1, 0);
590
591 #ifdef CONFIG_SOC_NRF53_RTC_PRETICK
592 SYS_INIT(rtc_pretick_init, POST_KERNEL, 0);
593 #endif
594