1 /* Copyright (c) 2021 Intel Corporation
2 * SPDX-License-Identifier: Apache-2.0
3 */
4 #ifndef __INTEL_ADSP_CPU_INIT_H
5 #define __INTEL_ADSP_CPU_INIT_H
6
7 #include <zephyr/arch/arch_inlines.h>
8 #include <zephyr/arch/xtensa/arch.h>
9 #include <xtensa/config/core-isa.h>
10 #include <xtensa/corebits.h>
11 #include <adsp_memory.h>
12
13 #define MEMCTL_VALUE (MEMCTL_INV_EN | MEMCTL_ICWU_MASK | MEMCTL_DCWA_MASK | \
14 MEMCTL_DCWU_MASK | MEMCTL_L0IBUF_EN)
15
16 #define ATOMCTL_BY_RCW BIT(0) /* RCW Transaction for Bypass Memory */
17 #define ATOMCTL_WT_RCW BIT(2) /* RCW Transaction for Writethrough Cacheable Memory */
18 #define ATOMCTL_WB_RCW BIT(4) /* RCW Transaction for Writeback Cacheable Memory */
19 #define ATOMCTL_VALUE (ATOMCTL_BY_RCW | ATOMCTL_WT_RCW | ATOMCTL_WB_RCW)
20
21 /* Low-level CPU initialization. Call this immediately after entering
22 * C code to initialize the cache, protection and synchronization
23 * features.
24 */
cpu_early_init(void)25 static ALWAYS_INLINE void cpu_early_init(void)
26 {
27 uint32_t reg;
28
29 #ifdef CONFIG_ADSP_NEED_POWER_ON_CACHE
30 /* First, we need to power the cache SRAM banks on! Write a bit
31 * for each cache way in the bottom half of the L1CCFG register
32 * and poll the top half for them to turn on.
33 */
34 uint32_t dmask = BIT(ADSP_CxL1CCAP_DCMWC) - 1;
35 uint32_t imask = BIT(ADSP_CxL1CCAP_ICMWC) - 1;
36 uint32_t waymask = (imask << 8) | dmask;
37
38 ADSP_CxL1CCFG_REG = waymask;
39 while (((ADSP_CxL1CCFG_REG >> 16) & waymask) != waymask) {
40 }
41
42 /* Prefetcher also power gates, same interface */
43 ADSP_CxL1PCFG_REG = 1;
44 while ((ADSP_CxL1PCFG_REG & 0x10000) == 0) {
45 }
46 #endif
47
48 /* Now set up the Xtensa CPU to enable the cache logic. The
49 * details of the fields are somewhat complicated, but per the
50 * ISA ref: "Turning on caches at power-up usually consists of
51 * writing a constant with bits[31:8] all 1’s to MEMCTL.".
52 * Also set bit 0 to enable the LOOP extension instruction
53 * fetch buffer.
54 */
55 #if XCHAL_USE_MEMCTL
56 reg = MEMCTL_VALUE;
57 XTENSA_WSR("MEMCTL", reg);
58 __asm__ volatile("rsync");
59 #endif
60
61 #if XCHAL_HAVE_THREADPTR
62 reg = 0;
63 XTENSA_WUR("THREADPTR", reg);
64 #endif
65
66 /* Likewise enable prefetching. Sadly these values are not
67 * architecturally defined by Xtensa (they're just documented
68 * as priority hints), so this constant is just copied from
69 * SOF for now. If we care about prefetch priority tuning
70 * we're supposed to ask Cadence I guess.
71 */
72 reg = ADSP_L1_CACHE_PREFCTL_VALUE;
73 XTENSA_WSR("PREFCTL", reg);
74 __asm__ volatile("rsync");
75
76 /* Finally we need to enable the cache in the Region
77 * Protection Option "TLB" entries. The hardware defaults
78 * have this set to RW/uncached everywhere.
79 *
80 * If we have MMU enabled, we don't need to do this right now.
81 * Let use the default configuration and properly configure the
82 * MMU when running from RAM.
83 */
84 #ifndef CONFIG_MMU
85 ARCH_XTENSA_SET_RPO_TLB();
86 #endif
87
88
89 /* Initialize ATOMCTL: Hardware defaults for S32C1I use
90 * "internal" operations, meaning they are atomic only WRT the
91 * local CPU! We need external transactions on the shared
92 * bus.
93 */
94 reg = ATOMCTL_VALUE;
95 XTENSA_WSR("ATOMCTL", reg);
96
97 /* Initialize interrupts to "disabled" */
98 reg = 0;
99 XTENSA_WSR("INTENABLE", reg);
100
101 /* Finally VECBASE. Note that on core 0 startup, we're still
102 * running in IMR and the vectors at this address won't be
103 * copied into HP-SRAM until later. That's OK, as interrupts
104 * are still disabled at this stage and will remain so
105 * consistently until Zephyr switches into the main thread.
106 */
107 reg = VECBASE_RESET_PADDR_SRAM;
108 XTENSA_WSR("VECBASE", reg);
109 }
110
111 #endif /* __INTEL_ADSP_CPU_INIT_H */
112