1 /*
2 * Copyright (c) 2021 Andes Technology Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/init.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/arch/cpu.h>
10 #include <zephyr/linker/linker-defs.h>
11 #include <zephyr/arch/riscv/csr.h>
12
13 #ifndef CONFIG_ASSERT
14 #define LOG_LEVEL CONFIG_LOG_DEFAULT_LEVEL
15 #include <zephyr/logging/log.h>
16 LOG_MODULE_REGISTER(pma_init, LOG_LEVEL);
17 #endif
18
19 /* Programmable PMA mechanism is supported */
20 #define MMSC_CFG_PPMA BIT(30)
21
22 /*
23 * PMA Configuration (PMACFG) bitfields
24 */
25
26 /* ETYPE: Entry address matching mode */
27 #define PMACFG_ETYPE_MASK BIT_MASK(2)
28 #define PMACFG_ETYPE_OFF 0
29 #define PMACFG_ETYPE_TOR 1
30 #define PMACFG_ETYPE_NA4 2
31 #define PMACFG_ETYPE_NAPOT 3
32
33 /* MTYPE: Memory type attribute */
34 #define PMACFG_MTYPE_MASK (0xF << 2)
35 /* non-cacheable attributes (bufferable or not) */
36 #define PMACFG_MTYPE_MEMORY_NOCACHE_BUFFERABLE (3 << 2)
37 /* cacheable attributes (write-through/back, no/read/write/RW-allocate) */
38 #define PMACFG_MTYPE_MEMORY_WBCACHE_RWALLOC (11 << 2)
39
40 /* pmaaddr is 4-byte granularity in each mode */
41 #define TO_PMA_ADDR(addr) ((addr) >> 2)
42
43 /* The base address is aligned to size */
44 #define NAPOT_BASE(start, size) TO_PMA_ADDR((start) & ~((size) - 1))
45 /* The encoding of size is 0b01...1, (change the leading bit of bitmask to 0) */
46 #define NAPOT_SIZE(size) TO_PMA_ADDR(((size) - 1) >> 1)
47
48 #define NA4_ENCODING(start) TO_PMA_ADDR(start)
49 #define NAPOT_ENCODING(start, size) (NAPOT_BASE(start, size) \
50 | NAPOT_SIZE(size))
51
52 #ifdef CONFIG_64BIT
53 /* In riscv64, CSR pmacfg number are even number (0, 2, ...) */
54 # define PMACFG_NUM(index) ((index / RV_REGSIZE) * 2)
55 #else
56 # define PMACFG_NUM(index) (index / RV_REGSIZE)
57 #endif
58 #define PMACFG_SHIFT(index) ((index % RV_REGSIZE) * 8)
59
60 struct pma_region_attr {
61 /* Attributes belonging to pmacfg{i} */
62 uint8_t pmacfg;
63 };
64
65 struct pma_region {
66 unsigned long start;
67 unsigned long size;
68 struct pma_region_attr attr;
69 };
70
71 /*
72 * Write value to CSRs pmaaddr{i}
73 */
write_pmaaddr_csr(const uint32_t index,unsigned long value)74 static void write_pmaaddr_csr(const uint32_t index, unsigned long value)
75 {
76 #define SWITCH_CASE_PMAADDR_WRITE(x) \
77 case (x): \
78 csr_write(NDS_PMAADDR##x, value); break;
79
80 switch (index) {
81 FOR_EACH(SWITCH_CASE_PMAADDR_WRITE, (;), 0, 1, 2, 3, 4, 5, 6, 7,
82 8, 9, 10, 11, 12, 13, 14, 15);
83 }
84 }
85
86 /*
87 * Write value to pma{i}cfg entry which are packed into CSRs pmacfg{j}
88 */
write_pmacfg_entry(const uint32_t entry_index,uint8_t entry_value)89 static void write_pmacfg_entry(const uint32_t entry_index, uint8_t entry_value)
90 {
91 /* 1-byte pma{i}cfg entries are packed into XLEN-byte CSRs pmacfg{j} */
92 uint32_t index = PMACFG_NUM(entry_index);
93 uint8_t shift = PMACFG_SHIFT(entry_index);
94 unsigned long pmacfg = 0;
95
96 #define SWITCH_CASE_PMACFG_READ(x) \
97 case (x): \
98 pmacfg = csr_read(NDS_PMACFG##x); break;
99
100 switch (index) {
101 FOR_EACH(SWITCH_CASE_PMACFG_READ, (;), 0, 1, 2, 3);
102 }
103
104 /* clear old value in pmacfg entry */
105 pmacfg &= ~(0xFF << shift);
106 /* set new value to pmacfg entry value */
107 pmacfg |= entry_value << shift;
108
109 #define SWITCH_CASE_PMACFG_WRITE(x) \
110 case (x): \
111 csr_write(NDS_PMACFG##x, pmacfg); break;
112
113 switch (index) {
114 FOR_EACH(SWITCH_CASE_PMACFG_WRITE, (;), 0, 1, 2, 3);
115 }
116 }
117
118 /*
119 * This internal function performs PMA region initialization.
120 *
121 * Note:
122 * The caller must provide a valid region index.
123 */
region_init(const uint32_t index,const struct pma_region * region_conf)124 static void region_init(const uint32_t index,
125 const struct pma_region *region_conf)
126 {
127 unsigned long pmaaddr;
128 uint8_t pmacfg;
129
130 if (region_conf->size == 4) {
131 pmaaddr = NA4_ENCODING(region_conf->start);
132 pmacfg = region_conf->attr.pmacfg | PMACFG_ETYPE_NA4;
133 } else {
134 pmaaddr = NAPOT_ENCODING(region_conf->start, region_conf->size);
135 pmacfg = region_conf->attr.pmacfg | PMACFG_ETYPE_NAPOT;
136 }
137
138 write_pmaaddr_csr(index, pmaaddr);
139 write_pmacfg_entry(index, pmacfg);
140 }
141
142 /*
143 * This internal function performs run-time sanity check for
144 * PMA region start address and size.
145 */
pma_region_is_valid(const struct pma_region * region)146 static int pma_region_is_valid(const struct pma_region *region)
147 {
148 /* Region size must greater or equal to the minimum PMA region size */
149 if (region->size < CONFIG_SOC_ANDES_V5_PMA_REGION_MIN_ALIGN_AND_SIZE) {
150 return -EINVAL;
151 }
152
153 /* Region size must be power-of-two */
154 if (region->size & (region->size - 1)) {
155 return -EINVAL;
156 }
157
158 /* Start address of the region must align with size */
159 if (region->start & (region->size - 1)) {
160 return -EINVAL;
161 }
162
163 return 0;
164 }
165
166 #ifdef CONFIG_NOCACHE_MEMORY
configure_nocache_region(void)167 static void configure_nocache_region(void)
168 {
169 const struct pma_region nocache_region = {
170 .start = (unsigned long)&_nocache_ram_start,
171 .size = (unsigned long)&_nocache_ram_size,
172 .attr = {PMACFG_MTYPE_MEMORY_NOCACHE_BUFFERABLE},
173 };
174
175 if (pma_region_is_valid(&nocache_region)) {
176 /* Skip PMA configuration if nocache region size is 0 */
177 if (nocache_region.size != 0) {
178 __ASSERT(0, "Configuring PMA region of nocache region "
179 "failed\n");
180 }
181 } else {
182 /* Initialize nocache region at PMA region 0 */
183 region_init(0, &nocache_region);
184 }
185 }
186 #endif /* CONFIG_NOCACHE_MEMORY */
187
188 /*
189 * @brief Init PMA CSRs of each CPU core
190 *
191 * In SMP, each CPU has it's own PMA CSR and PMA CSR only affect one CPU.
192 * We should configure CSRs of all CPUs to make memory attribute
193 * (e.g. uncacheable) affects all CPUs.
194 */
pma_init_per_core(void)195 void pma_init_per_core(void)
196 {
197 #ifdef CONFIG_NOCACHE_MEMORY
198 configure_nocache_region();
199 #endif /* CONFIG_NOCACHE_MEMORY */
200 }
201
pma_init(void)202 static int pma_init(void)
203 {
204 unsigned long mmsc_cfg;
205
206 mmsc_cfg = csr_read(NDS_MMSC_CFG);
207
208 if (!(mmsc_cfg & MMSC_CFG_PPMA)) {
209 /* This CPU doesn't support PMA */
210
211 __ASSERT(0, "CPU doesn't support PMA. "
212 "Please disable CONFIG_SOC_ANDES_V5_PMA\n");
213 #ifndef CONFIG_ASSERT
214 LOG_ERR("CPU doesn't support PMA. "
215 "Please disable CONFIG_SOC_ANDES_V5_PMA");
216 #endif
217 return -ENODEV;
218 }
219
220 pma_init_per_core();
221
222 return 0;
223 }
224
225 SYS_INIT(pma_init, PRE_KERNEL_2, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
226