1 /*
2  * Copyright (c) 2014-2020, Arm Limited and Contributors. All rights reserved.
3  * Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  */
7 
8 #include <assert.h>
9 #include <inttypes.h>
10 #include <stdint.h>
11 
12 #include <arch_helpers.h>
13 #include <common/debug.h>
14 #include <plat_startup.h>
15 
16 
17 /*
18  * HandoffParams
19  * Parameter		bitfield	encoding
20  * -----------------------------------------------------------------------------
21  * Exec State		0		0 -> Aarch64, 1-> Aarch32
22  * endianness		1		0 -> LE, 1 -> BE
23  * secure (TZ)		2		0 -> Non secure, 1 -> secure
24  * EL			3:4		00 -> EL0, 01 -> EL1, 10 -> EL2, 11 -> EL3
25  * CPU#			5:6		00 -> A53_0, 01 -> A53_1, 10 -> A53_2, 11 -> A53_3
26  * Reserved		7:10		Reserved
27  * Cluster#		11:12		00 -> Cluster 0, 01 -> Cluster 1, 10 -> Cluster 2,
28  *					11 -> Cluster (Applicable for Versal NET only).
29  * Reserved		13:16		Reserved
30  */
31 
32 #define XBL_FLAGS_ESTATE_SHIFT		0U
33 #define XBL_FLAGS_ESTATE_MASK		(1U << XBL_FLAGS_ESTATE_SHIFT)
34 #define XBL_FLAGS_ESTATE_A64		0U
35 #define XBL_FLAGS_ESTATE_A32		1U
36 
37 #define XBL_FLAGS_ENDIAN_SHIFT		1U
38 #define XBL_FLAGS_ENDIAN_MASK		(1U << XBL_FLAGS_ENDIAN_SHIFT)
39 #define XBL_FLAGS_ENDIAN_LE		0U
40 #define XBL_FLAGS_ENDIAN_BE		1U
41 
42 #define XBL_FLAGS_TZ_SHIFT		2U
43 #define XBL_FLAGS_TZ_MASK		(1U << XBL_FLAGS_TZ_SHIFT)
44 #define XBL_FLAGS_NON_SECURE		0U
45 #define XBL_FLAGS_SECURE		1U
46 
47 #define XBL_FLAGS_EL_SHIFT		3U
48 #define XBL_FLAGS_EL_MASK		(3U << XBL_FLAGS_EL_SHIFT)
49 #define XBL_FLAGS_EL0			0U
50 #define XBL_FLAGS_EL1			1U
51 #define XBL_FLAGS_EL2			2U
52 #define XBL_FLAGS_EL3			3U
53 
54 #define XBL_FLAGS_CPU_SHIFT		5U
55 #define XBL_FLAGS_CPU_MASK		(3U << XBL_FLAGS_CPU_SHIFT)
56 #define XBL_FLAGS_A53_0		0U
57 #define XBL_FLAGS_A53_1		1U
58 #define XBL_FLAGS_A53_2		2U
59 #define XBL_FLAGS_A53_3		3U
60 
61 #if defined(PLAT_versal_net)
62 #define XBL_FLAGS_CLUSTER_SHIFT		11U
63 #define XBL_FLAGS_CLUSTER_MASK		GENMASK(11, 12)
64 
65 #define XBL_FLAGS_CLUSTER_0		0U
66 #endif /* PLAT_versal_net */
67 
68 /**
69  * get_xbl_cpu() - Get the target CPU for partition.
70  * @partition: Pointer to partition struct.
71  *
72  * Return: XBL_FLAGS_A53_0, XBL_FLAGS_A53_1, XBL_FLAGS_A53_2 or XBL_FLAGS_A53_3.
73  *
74  */
get_xbl_cpu(const struct xbl_partition * partition)75 static int32_t get_xbl_cpu(const struct xbl_partition *partition)
76 {
77 	uint64_t flags = partition->flags & XBL_FLAGS_CPU_MASK;
78 
79 	return flags >> XBL_FLAGS_CPU_SHIFT;
80 }
81 
82 /**
83  * get_xbl_el() - Get the target exception level for partition.
84  * @partition: Pointer to partition struct.
85  *
86  * Return: XBL_FLAGS_EL0, XBL_FLAGS_EL1, XBL_FLAGS_EL2 or XBL_FLAGS_EL3.
87  *
88  */
get_xbl_el(const struct xbl_partition * partition)89 static int32_t get_xbl_el(const struct xbl_partition *partition)
90 {
91 	uint64_t flags = partition->flags & XBL_FLAGS_EL_MASK;
92 
93 	return flags >> XBL_FLAGS_EL_SHIFT;
94 }
95 
96 /**
97  * get_xbl_ss() - Get the target security state for partition.
98  * @partition: Pointer to partition struct.
99  *
100  * Return: XBL_FLAGS_NON_SECURE or XBL_FLAGS_SECURE.
101  *
102  */
get_xbl_ss(const struct xbl_partition * partition)103 static int32_t get_xbl_ss(const struct xbl_partition *partition)
104 {
105 	uint64_t flags = partition->flags & XBL_FLAGS_TZ_MASK;
106 
107 	return flags >> XBL_FLAGS_TZ_SHIFT;
108 }
109 
110 /**
111  * get_xbl_endian() - Get the target endianness for partition.
112  * @partition: Pointer to partition struct.
113  *
114  * Return: SPSR_E_LITTLE or SPSR_E_BIG.
115  *
116  */
get_xbl_endian(const struct xbl_partition * partition)117 static int32_t get_xbl_endian(const struct xbl_partition *partition)
118 {
119 	uint64_t flags = partition->flags & XBL_FLAGS_ENDIAN_MASK;
120 
121 	flags >>= XBL_FLAGS_ENDIAN_SHIFT;
122 
123 	if (flags == XBL_FLAGS_ENDIAN_BE) {
124 		return SPSR_E_BIG;
125 	} else {
126 		return SPSR_E_LITTLE;
127 	}
128 }
129 
130 /**
131  * get_xbl_estate() - Get the target execution state for partition.
132  * @partition: Pointer to partition struct.
133  *
134  * Return: XBL_FLAGS_ESTATE_A32 or XBL_FLAGS_ESTATE_A64.
135  *
136  */
get_xbl_estate(const struct xbl_partition * partition)137 static int32_t get_xbl_estate(const struct xbl_partition *partition)
138 {
139 	uint64_t flags = partition->flags & XBL_FLAGS_ESTATE_MASK;
140 
141 	return flags >> XBL_FLAGS_ESTATE_SHIFT;
142 }
143 
144 #if defined(PLAT_versal_net)
145 /**
146  * get_xbl_cluster - Get the cluster number
147  * @partition: pointer to the partition structure.
148  *
149  * Return: cluster number for the partition.
150  */
get_xbl_cluster(const struct xbl_partition * partition)151 static int32_t get_xbl_cluster(const struct xbl_partition *partition)
152 {
153 	uint64_t flags = partition->flags & XBL_FLAGS_CLUSTER_MASK;
154 
155 	return (int32_t)(flags >> XBL_FLAGS_CLUSTER_SHIFT);
156 }
157 #endif /* PLAT_versal_net */
158 
159 /**
160  * xbl_handover() - Populates the bl32 and bl33 image info structures.
161  * @bl32: BL32 image info structure.
162  * @bl33: BL33 image info structure.
163  * @handoff_addr: TF-A handoff address.
164  *
165  * Process the handoff parameters from the XBL and populate the BL32 and BL33
166  * image info structures accordingly.
167  *
168  * Return: Return the status of the handoff. The value will be from the
169  *         xbl_handoff enum.
170  *
171  */
xbl_handover(entry_point_info_t * bl32,entry_point_info_t * bl33,uint64_t handoff_addr)172 enum xbl_handoff xbl_handover(entry_point_info_t *bl32,
173 					entry_point_info_t *bl33,
174 					uint64_t handoff_addr)
175 {
176 	const struct xbl_handoff_params *HandoffParams;
177 
178 	if (!handoff_addr) {
179 		WARN("BL31: No handoff structure passed\n");
180 		return XBL_HANDOFF_NO_STRUCT;
181 	}
182 
183 	HandoffParams = (struct xbl_handoff_params *)handoff_addr;
184 	if ((HandoffParams->magic[0] != 'X') ||
185 	    (HandoffParams->magic[1] != 'L') ||
186 	    (HandoffParams->magic[2] != 'N') ||
187 	    (HandoffParams->magic[3] != 'X')) {
188 		ERROR("BL31: invalid handoff structure at %" PRIx64 "\n", handoff_addr);
189 		return XBL_HANDOFF_INVAL_STRUCT;
190 	}
191 
192 	VERBOSE("BL31: TF-A handoff params at:0x%" PRIx64 ", entries:%u\n",
193 		handoff_addr, HandoffParams->num_entries);
194 	if (HandoffParams->num_entries > XBL_MAX_PARTITIONS) {
195 		ERROR("BL31: TF-A handoff params: too many partitions (%u/%u)\n",
196 		      HandoffParams->num_entries, XBL_MAX_PARTITIONS);
197 		return XBL_HANDOFF_TOO_MANY_PARTS;
198 	}
199 
200 	/*
201 	 * we loop over all passed entries but only populate two image structs
202 	 * (bl32, bl33). I.e. the last applicable images in the handoff
203 	 * structure will be used for the hand off
204 	 */
205 	for (size_t i = 0; i < HandoffParams->num_entries; i++) {
206 		entry_point_info_t *image;
207 		int32_t target_estate, target_secure, target_cpu;
208 		uint32_t target_endianness, target_el;
209 
210 		VERBOSE("BL31: %zd: entry:0x%" PRIx64 ", flags:0x%" PRIx64 "\n", i,
211 			HandoffParams->partition[i].entry_point,
212 			HandoffParams->partition[i].flags);
213 
214 #if defined(PLAT_versal_net)
215 		uint32_t target_cluster;
216 
217 		target_cluster = get_xbl_cluster(&HandoffParams->partition[i]);
218 		if (target_cluster != XBL_FLAGS_CLUSTER_0) {
219 			WARN("BL31: invalid target Cluster (%i)\n",
220 			     target_cluster);
221 			continue;
222 		}
223 #endif /* PLAT_versal_net */
224 
225 		target_cpu = get_xbl_cpu(&HandoffParams->partition[i]);
226 		if (target_cpu != XBL_FLAGS_A53_0) {
227 			WARN("BL31: invalid target CPU (%i)\n", target_cpu);
228 			continue;
229 		}
230 
231 		target_el = get_xbl_el(&HandoffParams->partition[i]);
232 		if ((target_el == XBL_FLAGS_EL3) ||
233 		    (target_el == XBL_FLAGS_EL0)) {
234 			WARN("BL31: invalid target exception level(%i)\n",
235 			     target_el);
236 			continue;
237 		}
238 
239 		target_secure = get_xbl_ss(&HandoffParams->partition[i]);
240 		if (target_secure == XBL_FLAGS_SECURE &&
241 		    target_el == XBL_FLAGS_EL2) {
242 			WARN("BL31: invalid security state (%i) for exception level (%i)\n",
243 			     target_secure, target_el);
244 			continue;
245 		}
246 
247 		target_estate = get_xbl_estate(&HandoffParams->partition[i]);
248 		target_endianness = get_xbl_endian(&HandoffParams->partition[i]);
249 
250 		if (target_secure == XBL_FLAGS_SECURE) {
251 			image = bl32;
252 
253 			if (target_estate == XBL_FLAGS_ESTATE_A32) {
254 				bl32->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
255 							 target_endianness,
256 							 DISABLE_ALL_EXCEPTIONS);
257 			} else {
258 				bl32->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
259 						     DISABLE_ALL_EXCEPTIONS);
260 			}
261 		} else {
262 			image = bl33;
263 
264 			if (target_estate == XBL_FLAGS_ESTATE_A32) {
265 				if (target_el == XBL_FLAGS_EL2) {
266 					target_el = MODE32_hyp;
267 				} else {
268 					target_el = MODE32_sys;
269 				}
270 
271 				bl33->spsr = SPSR_MODE32(target_el, SPSR_T_ARM,
272 							 target_endianness,
273 							 DISABLE_ALL_EXCEPTIONS);
274 			} else {
275 				if (target_el == XBL_FLAGS_EL2) {
276 					target_el = MODE_EL2;
277 				} else {
278 					target_el = MODE_EL1;
279 				}
280 
281 				bl33->spsr = SPSR_64(target_el, MODE_SP_ELX,
282 						     DISABLE_ALL_EXCEPTIONS);
283 			}
284 		}
285 
286 		VERBOSE("Setting up %s entry point to:%" PRIx64 ", el:%x\n",
287 			target_secure == XBL_FLAGS_SECURE ? "BL32" : "BL33",
288 			HandoffParams->partition[i].entry_point,
289 			target_el);
290 		image->pc = HandoffParams->partition[i].entry_point;
291 
292 		if (target_endianness == SPSR_E_BIG) {
293 			EP_SET_EE(image->h.attr, EP_EE_BIG);
294 		} else {
295 			EP_SET_EE(image->h.attr, EP_EE_LITTLE);
296 		}
297 	}
298 
299 	return XBL_HANDOFF_SUCCESS;
300 }
301