1 /*
2  * Copyright (c) 2021 Andes Technology Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include "soc_v5.h"
8 
9 #include <zephyr/init.h>
10 #include <zephyr/kernel.h>
11 #include <zephyr/arch/cpu.h>
12 #include <zephyr/linker/linker-defs.h>
13 #include <zephyr/arch/riscv/csr.h>
14 
15 #ifndef CONFIG_ASSERT
16 #define LOG_LEVEL CONFIG_LOG_DEFAULT_LEVEL
17 #include <zephyr/logging/log.h>
18 LOG_MODULE_REGISTER(pma_init, LOG_LEVEL);
19 #endif
20 
21 /* Programmable PMA mechanism is supported */
22 #define MMSC_CFG_PPMA		BIT(30)
23 
24 /*
25  * PMA Configuration (PMACFG) bitfields
26  */
27 
28 /* ETYPE: Entry address matching mode */
29 #define PMACFG_ETYPE_MASK	BIT_MASK(2)
30 #define PMACFG_ETYPE_OFF	0
31 #define PMACFG_ETYPE_TOR	1
32 #define PMACFG_ETYPE_NA4	2
33 #define PMACFG_ETYPE_NAPOT	3
34 
35 /* MTYPE: Memory type attribute */
36 #define PMACFG_MTYPE_MASK			(0xF << 2)
37 /* non-cacheable attributes (bufferable or not) */
38 #define PMACFG_MTYPE_MEMORY_NOCACHE_BUFFERABLE	(3 << 2)
39 /* cacheable attributes (write-through/back, no/read/write/RW-allocate) */
40 #define PMACFG_MTYPE_MEMORY_WBCACHE_RWALLOC	(11 << 2)
41 
42 /* pmaaddr is 4-byte granularity in each mode */
43 #define TO_PMA_ADDR(addr)		((addr) >> 2)
44 
45 /* The base address is aligned to size */
46 #define NAPOT_BASE(start, size)		TO_PMA_ADDR((start) & ~((size) - 1))
47 /* The encoding of size is 0b01...1, (change the leading bit of bitmask to 0) */
48 #define NAPOT_SIZE(size)		TO_PMA_ADDR(((size) - 1) >> 1)
49 
50 #define NA4_ENCODING(start)		TO_PMA_ADDR(start)
51 #define NAPOT_ENCODING(start, size)	(NAPOT_BASE(start, size) \
52 					 | NAPOT_SIZE(size))
53 
54 #ifdef CONFIG_64BIT
55 /* In riscv64, CSR pmacfg number are even number (0, 2, ...) */
56 # define PMACFG_NUM(index)		((index / RV_REGSIZE) * 2)
57 #else
58 # define PMACFG_NUM(index)		(index / RV_REGSIZE)
59 #endif
60 #define PMACFG_SHIFT(index)		((index % RV_REGSIZE) * 8)
61 
62 struct pma_region_attr {
63 	/* Attributes belonging to pmacfg{i} */
64 	uint8_t pmacfg;
65 };
66 
67 struct pma_region {
68 	unsigned long start;
69 	unsigned long size;
70 	struct pma_region_attr attr;
71 };
72 
73 /*
74  * Write value to CSRs pmaaddr{i}
75  */
write_pmaaddr_csr(const uint32_t index,unsigned long value)76 static void write_pmaaddr_csr(const uint32_t index, unsigned long value)
77 {
78 	#define SWITCH_CASE_PMAADDR_WRITE(x)		\
79 		case (x):				\
80 		csr_write(NDS_PMAADDR##x,  value); break;
81 
82 	switch (index) {
83 	FOR_EACH(SWITCH_CASE_PMAADDR_WRITE, (;), 0, 1, 2, 3, 4, 5, 6, 7,
84 	8, 9, 10, 11, 12, 13, 14, 15);
85 	}
86 }
87 
88 /*
89  * Write value to pma{i}cfg entry which are packed into CSRs pmacfg{j}
90  */
write_pmacfg_entry(const uint32_t entry_index,uint8_t entry_value)91 static void write_pmacfg_entry(const uint32_t entry_index, uint8_t entry_value)
92 {
93 	/* 1-byte pma{i}cfg entries are packed into XLEN-byte CSRs pmacfg{j} */
94 	uint32_t index = PMACFG_NUM(entry_index);
95 	uint8_t shift = PMACFG_SHIFT(entry_index);
96 	unsigned long pmacfg = 0;
97 
98 	#define SWITCH_CASE_PMACFG_READ(x)		\
99 		case (x):				\
100 		pmacfg = csr_read(NDS_PMACFG##x); break;
101 
102 	switch (index) {
103 	FOR_EACH(SWITCH_CASE_PMACFG_READ, (;), 0, 1, 2, 3);
104 	}
105 
106 	/* clear old value in pmacfg entry */
107 	pmacfg &= ~(0xFF << shift);
108 	/* set new value to pmacfg entry value */
109 	pmacfg |= entry_value << shift;
110 
111 	#define SWITCH_CASE_PMACFG_WRITE(x)		\
112 		case (x):				\
113 		csr_write(NDS_PMACFG##x, pmacfg); break;
114 
115 	switch (index) {
116 	FOR_EACH(SWITCH_CASE_PMACFG_WRITE, (;), 0, 1, 2, 3);
117 	}
118 }
119 
120 /*
121  * This internal function performs PMA region initialization.
122  *
123  * Note:
124  *   The caller must provide a valid region index.
125  */
region_init(const uint32_t index,const struct pma_region * region_conf)126 static void region_init(const uint32_t index,
127 	const struct pma_region *region_conf)
128 {
129 	unsigned long pmaaddr;
130 	uint8_t pmacfg;
131 
132 	if (region_conf->size == 4) {
133 		pmaaddr = NA4_ENCODING(region_conf->start);
134 		pmacfg = region_conf->attr.pmacfg | PMACFG_ETYPE_NA4;
135 	} else {
136 		pmaaddr = NAPOT_ENCODING(region_conf->start, region_conf->size);
137 		pmacfg = region_conf->attr.pmacfg | PMACFG_ETYPE_NAPOT;
138 	}
139 
140 	write_pmaaddr_csr(index, pmaaddr);
141 	write_pmacfg_entry(index, pmacfg);
142 }
143 
144 /*
145  * This internal function performs run-time sanity check for
146  * PMA region start address and size.
147  */
pma_region_is_valid(const struct pma_region * region)148 static int pma_region_is_valid(const struct pma_region *region)
149 {
150 	/* Region size must greater or equal to the minimum PMA region size */
151 	if (region->size < CONFIG_SOC_ANDES_V5_PMA_REGION_MIN_ALIGN_AND_SIZE) {
152 		return -EINVAL;
153 	}
154 
155 	/* Region size must be power-of-two */
156 	if (region->size & (region->size - 1)) {
157 		return -EINVAL;
158 	}
159 
160 	/* Start address of the region must align with size */
161 	if (region->start & (region->size - 1)) {
162 		return -EINVAL;
163 	}
164 
165 	return 0;
166 }
167 
168 #ifdef CONFIG_NOCACHE_MEMORY
configure_nocache_region(void)169 static void configure_nocache_region(void)
170 {
171 	const struct pma_region nocache_region = {
172 		.start = (unsigned long)&_nocache_ram_start,
173 		.size = (unsigned long)&_nocache_ram_size,
174 		.attr = {PMACFG_MTYPE_MEMORY_NOCACHE_BUFFERABLE},
175 	};
176 
177 	if (pma_region_is_valid(&nocache_region)) {
178 		/* Skip PMA configuration if nocache region size is 0 */
179 		if (nocache_region.size != 0) {
180 			__ASSERT(0, "Configuring PMA region of nocache region "
181 				    "failed\n");
182 		}
183 	} else {
184 		/* Initialize nocache region at PMA region 0 */
185 		region_init(0, &nocache_region);
186 	}
187 }
188 #endif /* CONFIG_NOCACHE_MEMORY */
189 
190 /*
191  * @brief Init PMA CSRs of each CPU core
192  *
193  * In SMP, each CPU has it's own PMA CSR and PMA CSR only affect one CPU.
194  * We should configure CSRs of all CPUs to make memory attribute
195  * (e.g. uncacheable) affects all CPUs.
196  */
pma_init_per_core(void)197 void pma_init_per_core(void)
198 {
199 #ifdef CONFIG_NOCACHE_MEMORY
200 	configure_nocache_region();
201 #endif /* CONFIG_NOCACHE_MEMORY */
202 }
203 
pma_init(void)204 static int pma_init(void)
205 {
206 	unsigned long mmsc_cfg;
207 
208 	mmsc_cfg = csr_read(NDS_MMSC_CFG);
209 
210 	if (!(mmsc_cfg & MMSC_CFG_PPMA)) {
211 		/* This CPU doesn't support PMA */
212 
213 		__ASSERT(0, "CPU doesn't support PMA. "
214 			    "Please disable CONFIG_SOC_ANDES_V5_PMA\n");
215 #ifndef CONFIG_ASSERT
216 		LOG_ERR("CPU doesn't support PMA. "
217 			"Please disable CONFIG_SOC_ANDES_V5_PMA");
218 #endif
219 		return -ENODEV;
220 	}
221 
222 	pma_init_per_core();
223 
224 	return 0;
225 }
226 
227 SYS_INIT(pma_init, PRE_KERNEL_2, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
228