1 /*
2 * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
3 * Copyright (c) 2022 Cypress Semiconductor Corporation (an Infineon
4 * company) or an affiliate of Cypress Semiconductor Corporation. All rights
5 * reserved.
6 *
7 * SPDX-License-Identifier: BSD-3-Clause
8 *
9 */
10 #include <limits.h>
11 #include <stdint.h>
12 #include "config_impl.h"
13 #include "lists.h"
14 #include "memory_symbols.h"
15 #include "region_defs.h"
16 #include "spm.h"
17 #include "tfm_hal_interrupt.h"
18 #include "tfm_plat_defs.h"
19 #include "utilities.h"
20 #include "ffm/backend.h"
21 #include "load/partition_defs.h"
22 #include "load/spm_load_api.h"
23 #include "load/service_defs.h"
24 #include "psa/client.h"
25
26 static uintptr_t ldinf_sa = PART_INFOLIST_START;
27 static uintptr_t ldinf_ea = PART_INFOLIST_END;
28 static uintptr_t part_pool_sa = PART_INFORAM_START;
29 static uintptr_t part_pool_ea = PART_INFORAM_END;
30 static uintptr_t serv_pool_sa = SERV_INFORAM_START;
31 static uintptr_t serv_pool_ea = SERV_INFORAM_END;
32
33 /* Allocate runtime space for partition. Panic if pool runs out. */
tfm_allocate_partition_assuredly(void)34 static struct partition_t *tfm_allocate_partition_assuredly(void)
35 {
36 struct partition_t *p_part_allocated = (struct partition_t *)part_pool_sa;
37
38 part_pool_sa += sizeof(struct partition_t);
39 if (part_pool_sa > part_pool_ea) {
40 tfm_core_panic();
41 }
42
43 return p_part_allocated;
44 }
45
46 /* Allocate runtime space for services. Panic if pool runs out. */
tfm_allocate_service_assuredly(uint32_t service_count)47 static struct service_t *tfm_allocate_service_assuredly(uint32_t service_count)
48 {
49 struct service_t *p_serv_allocated = (struct service_t *)serv_pool_sa;
50
51 if (service_count == 0) {
52 return NULL;
53 }
54
55 serv_pool_sa += service_count * sizeof(struct service_t);
56 if (serv_pool_sa > serv_pool_ea) {
57 tfm_core_panic();
58 }
59
60 return p_serv_allocated;
61 }
62
load_a_partition_assuredly(struct partition_head_t * head)63 struct partition_t *load_a_partition_assuredly(struct partition_head_t *head)
64 {
65 struct partition_load_info_t *p_ptldinf;
66 struct partition_t *partition;
67 int32_t client_id_base;
68 int32_t client_id_limit;
69
70 if (!head) {
71 tfm_core_panic();
72 }
73
74 if ((UINTPTR_MAX - ldinf_sa < sizeof(struct partition_load_info_t)) ||
75 (ldinf_sa + sizeof(struct partition_load_info_t) >= ldinf_ea)) {
76 return NO_MORE_PARTITION;
77 }
78
79 p_ptldinf = (struct partition_load_info_t *)ldinf_sa;
80
81 if ((UINTPTR_MAX - ldinf_sa < LOAD_INFSZ_BYTES(p_ptldinf)) ||
82 (ldinf_sa + LOAD_INFSZ_BYTES(p_ptldinf) > ldinf_ea)) {
83 tfm_core_panic();
84 }
85
86 /* Magic ensures data integrity */
87 if ((p_ptldinf->psa_ff_ver & PARTITION_INFO_MAGIC_MASK)
88 != PARTITION_INFO_MAGIC) {
89 tfm_core_panic();
90 }
91
92 if ((p_ptldinf->psa_ff_ver & PARTITION_INFO_VERSION_MASK)
93 > PSA_FRAMEWORK_VERSION) {
94 tfm_core_panic();
95 }
96
97 if (p_ptldinf->pid < 0) {
98 /* 0 is the internal NS Agent, besides the normal positive PIDs */
99 tfm_core_panic();
100 }
101
102 if (p_ptldinf->client_id_base > p_ptldinf->client_id_limit) {
103 tfm_core_panic();
104 }
105
106 /* Client ID range overlap check between NS agent partitions. */
107 if (IS_NS_AGENT(p_ptldinf)) {
108 UNI_LIST_FOREACH(partition, head, next) {
109 if (!IS_NS_AGENT(partition->p_ldinf)) {
110 continue;
111 }
112 client_id_base = partition->p_ldinf->client_id_base;
113 client_id_limit = partition->p_ldinf->client_id_limit;
114 if ((p_ptldinf->client_id_limit >= client_id_base) &&
115 (p_ptldinf->client_id_base <= client_id_limit)) {
116 tfm_core_panic();
117 }
118 }
119 }
120
121 partition = tfm_allocate_partition_assuredly();
122 partition->p_ldinf = p_ptldinf;
123
124 ldinf_sa += LOAD_INFSZ_BYTES(p_ptldinf);
125
126 UNI_LIST_INSERT_AFTER(head, partition, next);
127
128 return partition;
129 }
130
load_services_assuredly(struct partition_t * p_partition,struct service_head_t * services_listhead,struct service_t ** stateless_services_ref_tbl,size_t ref_tbl_size)131 uint32_t load_services_assuredly(struct partition_t *p_partition,
132 struct service_head_t *services_listhead,
133 struct service_t **stateless_services_ref_tbl,
134 size_t ref_tbl_size)
135 {
136 uint32_t i, serv_ldflags, hidx, service_setting = 0;
137 struct service_t *services;
138 const struct partition_load_info_t *p_ptldinf;
139 const struct service_load_info_t *p_servldinf;
140
141 if (!p_partition || !services_listhead) {
142 tfm_core_panic();
143 }
144
145 p_ptldinf = p_partition->p_ldinf;
146 p_servldinf = LOAD_INFO_SERVICE(p_ptldinf);
147
148 /*
149 * 'services' CAN be NULL when no services, which is a rational result.
150 * The loop won't go in the NULL case.
151 */
152 services = tfm_allocate_service_assuredly(p_ptldinf->nservices);
153 for (i = 0; i < p_ptldinf->nservices && services; i++) {
154 services[i].p_ldinf = &p_servldinf[i];
155 services[i].partition = p_partition;
156 services[i].next = NULL;
157
158 BACKEND_SERVICE_SET(service_setting, &p_servldinf[i]);
159
160 /* Populate the stateless service reference table */
161 serv_ldflags = p_servldinf[i].flags;
162 if (SERVICE_IS_STATELESS(serv_ldflags)) {
163 if ((stateless_services_ref_tbl == NULL) ||
164 (ref_tbl_size == 0) ||
165 (ref_tbl_size !=
166 STATIC_HANDLE_NUM_LIMIT * sizeof(struct service_t *))) {
167 tfm_core_panic();
168 }
169
170 hidx = SERVICE_GET_STATELESS_HINDEX(serv_ldflags);
171
172 if ((hidx >= STATIC_HANDLE_NUM_LIMIT) ||
173 stateless_services_ref_tbl[hidx]) {
174 tfm_core_panic();
175 }
176 stateless_services_ref_tbl[hidx] = &services[i];
177 }
178
179 UNI_LIST_INSERT_AFTER(services_listhead, &services[i], next);
180 }
181
182 return service_setting;
183 }
184
load_irqs_assuredly(struct partition_t * p_partition)185 void load_irqs_assuredly(struct partition_t *p_partition)
186 {
187 #if CONFIG_TFM_FLIH_API == 1 || CONFIG_TFM_SLIH_API == 1
188 const struct irq_load_info_t *p_irq_info;
189 const struct partition_load_info_t *p_ldinf;
190 uint32_t i;
191
192 if (!p_partition) {
193 tfm_core_panic();
194 }
195
196 p_ldinf = p_partition->p_ldinf;
197 p_irq_info = LOAD_INFO_IRQ(p_ldinf);
198
199 for (i = 0; i < p_ldinf->nirqs; i++) {
200 p_partition->signals_allowed |= p_irq_info->signal;
201
202 /* The client ID range of the irq should not beyond that of the partition. */
203 if (IS_NS_AGENT_MAILBOX(p_ldinf)) {
204 if ((p_irq_info->client_id_base > p_irq_info->client_id_limit) ||
205 (p_irq_info->client_id_base < p_ldinf->client_id_base) ||
206 (p_irq_info->client_id_limit > p_ldinf->client_id_limit)) {
207 tfm_core_panic();
208 }
209 }
210
211 if (p_irq_info->init(p_partition, p_irq_info) != TFM_HAL_SUCCESS) {
212 tfm_core_panic();
213 }
214
215 if ((p_ldinf->psa_ff_ver & PARTITION_INFO_VERSION_MASK) == 0x0100) {
216 tfm_hal_irq_enable(p_irq_info->source);
217 } else if ((p_ldinf->psa_ff_ver & PARTITION_INFO_VERSION_MASK)
218 == 0x0101) {
219 tfm_hal_irq_disable(p_irq_info->source);
220 }
221 p_irq_info++;
222 }
223 #endif
224 }
225