1 /*
2  * Copyright (c) 2021 Andes Technology Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <init.h>
8 #include <kernel.h>
9 #include <arch/cpu.h>
10 #include <linker/linker-defs.h>
11 
12 /* Programmable PMA mechanism is supported */
13 #define MMSC_CFG_PPMA		(1 << 30)
14 
15 /*
16  * PMA Configuration (PMACFG) bitfields
17  */
18 
19 /* ETYPE: Entry address matching mode */
20 #define PMACFG_ETYPE_MASK	3
21 #define PMACFG_ETYPE_OFF	0
22 #define PMACFG_ETYPE_TOR	1
23 #define PMACFG_ETYPE_NA4	2
24 #define PMACFG_ETYPE_NAPOT	3
25 
26 /* MTYPE: Memory type attribute */
27 #define PMACFG_MTYPE_MASK			(0xF << 2)
28 /* non-cacheable attributes (bufferable or not) */
29 #define PMACFG_MTYPE_MEMORY_NOCACHE_BUFFERABLE	(3 << 2)
30 /* cacheable attributes (write-through/back, no/read/write/RW-allocate) */
31 #define PMACFG_MTYPE_MEMORY_WBCACHE_RWALLOC	(11 << 2)
32 
33 /* pmaaddr is 4-byte granularity in each mode */
34 #define TO_PMA_ADDR(addr)		((addr) >> 2)
35 
36 /* The base address is aligned to size */
37 #define NAPOT_BASE(start, size)		TO_PMA_ADDR((start) & ~((size) - 1))
38 /* The encoding of size is 0b01...1
39  * (change the leading bit of bitmask to 0)
40  */
41 #define NAPOT_SIZE(size)		TO_PMA_ADDR(((size) - 1) >> 1)
42 
43 #define NA4_ENCODING(start)		TO_PMA_ADDR(start)
44 #define NAPOT_ENCODING(start, size)	(NAPOT_BASE(start, size) \
45 					 | NAPOT_SIZE(size))
46 
47 #ifdef CONFIG_64BIT
48 /* In riscv64, CSR pmacfg number are even number (0, 2, ...) */
49 # define PMACFG_NUM(index)		((index / RV_REGSIZE) * 2)
50 #else
51 # define PMACFG_NUM(index)		(index / RV_REGSIZE)
52 #endif
53 #define PMACFG_SHIFT(index)		((index % RV_REGSIZE) * 8)
54 
55 /* Wrappers of inline assembly */
56 #define read_csr(var, csr) \
57 	__asm__ volatile ("csrr %0, %1" : "=r" (var) : "i" (csr))
58 #define write_csr(csr, val) \
59 	__asm__ volatile ("csrw %0, %1" :: "i" (csr), "r" (val))
60 
61 struct pma_region_attr {
62 	/* Attributes belonging to pmacfg{i} */
63 	uint8_t pmacfg;
64 };
65 
66 struct pma_region {
67 	ulong_t start;
68 	ulong_t size;
69 	struct pma_region_attr attr;
70 };
71 
72 /*
73  * Write value to CSRs pmaaddr{i}
74  */
write_pmaaddr_csr(const uint32_t index,ulong_t value)75 static void write_pmaaddr_csr(const uint32_t index, ulong_t value)
76 {
77 	switch (index) {
78 	case 0:
79 		write_csr(NDS_PMAADDR0,  value); break;
80 	case 1:
81 		write_csr(NDS_PMAADDR1,  value); break;
82 	case 2:
83 		write_csr(NDS_PMAADDR2,  value); break;
84 	case 3:
85 		write_csr(NDS_PMAADDR3,  value); break;
86 	case 4:
87 		write_csr(NDS_PMAADDR4,  value); break;
88 	case 5:
89 		write_csr(NDS_PMAADDR5,  value); break;
90 	case 6:
91 		write_csr(NDS_PMAADDR6,  value); break;
92 	case 7:
93 		write_csr(NDS_PMAADDR7,  value); break;
94 	case 8:
95 		write_csr(NDS_PMAADDR8,  value); break;
96 	case 9:
97 		write_csr(NDS_PMAADDR9,  value); break;
98 	case 10:
99 		write_csr(NDS_PMAADDR10, value); break;
100 	case 11:
101 		write_csr(NDS_PMAADDR11, value); break;
102 	case 12:
103 		write_csr(NDS_PMAADDR12, value); break;
104 	case 13:
105 		write_csr(NDS_PMAADDR13, value); break;
106 	case 14:
107 		write_csr(NDS_PMAADDR14, value); break;
108 	case 15:
109 		write_csr(NDS_PMAADDR15, value); break;
110 	}
111 }
112 
113 /*
114  * Write value to pma{i}cfg entry which are packed into CSRs pmacfg{j}
115  */
write_pmacfg_entry(const uint32_t entry_index,uint8_t entry_value)116 static void write_pmacfg_entry(const uint32_t entry_index,
117 	uint8_t entry_value)
118 {
119 	/* 1-byte pma{i}cfg entries are packed into XLEN-byte CSRs pmacfg{j} */
120 	uint32_t index = PMACFG_NUM(entry_index);
121 	uint8_t shift = PMACFG_SHIFT(entry_index);
122 	ulong_t pmacfg = 0;
123 
124 	switch (index) {
125 	case 0:
126 		read_csr(pmacfg, NDS_PMACFG0); break;
127 	case 1:
128 		read_csr(pmacfg, NDS_PMACFG1); break;
129 	case 2:
130 		read_csr(pmacfg, NDS_PMACFG2); break;
131 	case 3:
132 		read_csr(pmacfg, NDS_PMACFG3); break;
133 	}
134 
135 	/* clear old value in pmacfg entry */
136 	pmacfg &= ~(0xFF << shift);
137 	/* set new value to pmacfg entry value */
138 	pmacfg |= entry_value << shift;
139 
140 	switch (index) {
141 	case 0:
142 		write_csr(NDS_PMACFG0, pmacfg); break;
143 	case 1:
144 		write_csr(NDS_PMACFG1, pmacfg); break;
145 	case 2:
146 		write_csr(NDS_PMACFG2, pmacfg); break;
147 	case 3:
148 		write_csr(NDS_PMACFG3, pmacfg); break;
149 	}
150 }
151 
152 /*
153  * This internal function performs PMA region initialization.
154  *
155  * Note:
156  *   The caller must provide a valid region index.
157  */
region_init(const uint32_t index,const struct pma_region * region_conf)158 static void region_init(const uint32_t index,
159 	const struct pma_region *region_conf)
160 {
161 	ulong_t pmaaddr;
162 	uint8_t pmacfg;
163 
164 	if (region_conf->size == 4) {
165 		pmaaddr = NA4_ENCODING(region_conf->start);
166 		pmacfg = region_conf->attr.pmacfg | PMACFG_ETYPE_NA4;
167 	} else {
168 		pmaaddr = NAPOT_ENCODING(region_conf->start, region_conf->size);
169 		pmacfg = region_conf->attr.pmacfg | PMACFG_ETYPE_NAPOT;
170 	}
171 
172 	write_pmaaddr_csr(index, pmaaddr);
173 	write_pmacfg_entry(index, pmacfg);
174 }
175 
176 /*
177  * This internal function performs run-time sanity check for
178  * PMA region start address and size.
179  */
pma_region_is_valid(const struct pma_region * region)180 static int pma_region_is_valid(const struct pma_region *region)
181 {
182 	/* Region size must be power-of-two,
183 	 * and greater or equal to the minimum
184 	 * PMA region size. Start address of the
185 	 * region must align with size.
186 	 */
187 	int region_is_valid =
188 		((region->size & (region->size - 1)) == 0U)
189 		&&
190 		(region->size >= CONFIG_SOC_ANDES_V5_PMA_REGION_MIN_ALIGN_AND_SIZE)
191 		&&
192 		((region->start & (region->size - 1)) == 0U);
193 
194 	return region_is_valid;
195 }
196 
197 #ifdef CONFIG_NOCACHE_MEMORY
configure_nocache_region(void)198 static void configure_nocache_region(void)
199 {
200 	const struct pma_region nocache_region = {
201 		.start = (ulong_t)&_nocache_ram_start,
202 		.size = (ulong_t)&_nocache_ram_size,
203 		.attr = {PMACFG_MTYPE_MEMORY_NOCACHE_BUFFERABLE},
204 	};
205 
206 	if (pma_region_is_valid(&nocache_region) == -EINVAL) {
207 		__ASSERT(0, "Configuring PMA region of nocache region failed\n");
208 	}
209 
210 	/* Initialize nocache region at PMA region 0 */
211 	region_init(0, &nocache_region);
212 }
213 #endif /* CONFIG_NOCACHE_MEMORY */
214 
215 /*
216  * @brief Init PMA CSRs of each CPU core
217  *
218  * In SMP, each CPU has it's own PMA CSR and PMA CSR only affect one CPU.
219  * We should configure CSRs of all CPUs to make memory attribute
220  * (e.g. uncacheable) affects all CPUs.
221  */
pma_init_per_core(void)222 void pma_init_per_core(void)
223 {
224 #ifdef CONFIG_NOCACHE_MEMORY
225 	configure_nocache_region();
226 #endif /* CONFIG_NOCACHE_MEMORY */
227 }
228 
pma_init(const struct device * arg)229 static int pma_init(const struct device *arg)
230 {
231 	ulong_t mmsc_cfg;
232 
233 	__asm__ volatile ("csrr %0, %1" : "=r" (mmsc_cfg) : "i" (NDS_MMSC_CFG));
234 
235 	if (!(mmsc_cfg & MMSC_CFG_PPMA)) {
236 		/* This CPU doesn't support PMA */
237 
238 		__ASSERT(0, "CPU doesn't support PMA. "
239 			    "Please disable CONFIG_SOC_ANDES_V5_PMA\n");
240 		return -1;
241 	}
242 
243 	pma_init_per_core();
244 
245 	return 0;
246 }
247 
248 SYS_INIT(pma_init, PRE_KERNEL_2,
249 	CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
250