1 /*
2  * Copyright (c) 2022, Arm Limited. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <common/debug.h>
8 #include "ffa_helpers.h"
9 #include <services/ffa_svc.h>
10 #include "tsp_private.h"
11 
12 /*******************************************************************************
13  * Wrapper function to send a direct request.
14  ******************************************************************************/
ffa_msg_send_direct_req(ffa_endpoint_id16_t sender,ffa_endpoint_id16_t receiver,uint32_t arg3,uint32_t arg4,uint32_t arg5,uint32_t arg6,uint32_t arg7)15 smc_args_t ffa_msg_send_direct_req(ffa_endpoint_id16_t sender,
16 				   ffa_endpoint_id16_t receiver,
17 				   uint32_t arg3,
18 				   uint32_t arg4,
19 				   uint32_t arg5,
20 				   uint32_t arg6,
21 				   uint32_t arg7)
22 {
23 	uint32_t src_dst_ids = (sender << FFA_DIRECT_MSG_SOURCE_SHIFT) |
24 			       (receiver << FFA_DIRECT_MSG_DESTINATION_SHIFT);
25 
26 
27 	/* Send Direct Request. */
28 	return smc_helper(FFA_MSG_SEND_DIRECT_REQ_SMC64, src_dst_ids,
29 			0, arg3, arg4, arg5, arg6, arg7);
30 }
31 
32 /*******************************************************************************
33  * Wrapper function to send a direct response.
34  ******************************************************************************/
ffa_msg_send_direct_resp(ffa_endpoint_id16_t sender,ffa_endpoint_id16_t receiver,uint32_t arg3,uint32_t arg4,uint32_t arg5,uint32_t arg6,uint32_t arg7)35 smc_args_t *ffa_msg_send_direct_resp(ffa_endpoint_id16_t sender,
36 				     ffa_endpoint_id16_t receiver,
37 				     uint32_t arg3,
38 				     uint32_t arg4,
39 				     uint32_t arg5,
40 				     uint32_t arg6,
41 				     uint32_t arg7)
42 {
43 	uint32_t src_dst_ids = (sender << FFA_DIRECT_MSG_SOURCE_SHIFT) |
44 			       (receiver << FFA_DIRECT_MSG_DESTINATION_SHIFT);
45 
46 	return set_smc_args(FFA_MSG_SEND_DIRECT_RESP_SMC64, src_dst_ids,
47 			    0, arg3, arg4, arg5, arg6, arg7);
48 }
49 
50 /*******************************************************************************
51  * Memory Management Helpers.
52  ******************************************************************************/
53 
54 /**
55  * Initialises the header of the given `ffa_mtd`, not including the
56  * composite memory region offset.
57  */
ffa_memory_region_init_header(struct ffa_mtd * memory_region,ffa_endpoint_id16_t sender,ffa_mem_attr16_t attributes,ffa_mtd_flag32_t flags,uint64_t handle,uint64_t tag,ffa_endpoint_id16_t * receivers,uint32_t receiver_count,ffa_mem_perm8_t permissions)58 static void ffa_memory_region_init_header(
59 	struct ffa_mtd *memory_region, ffa_endpoint_id16_t sender,
60 	ffa_mem_attr16_t attributes, ffa_mtd_flag32_t flags,
61 	uint64_t handle, uint64_t tag, ffa_endpoint_id16_t *receivers,
62 	uint32_t receiver_count, ffa_mem_perm8_t permissions)
63 {
64 	struct ffa_emad_v1_0 *emad;
65 
66 	memory_region->emad_offset = sizeof(struct ffa_mtd);
67 	memory_region->emad_size = sizeof(struct ffa_emad_v1_0);
68 	emad = (struct ffa_emad_v1_0 *)
69 		    ((uint8_t *) memory_region +
70 		     memory_region->emad_offset);
71 	memory_region->sender_id = sender;
72 	memory_region->memory_region_attributes = attributes;
73 	memory_region->reserved_36_39 = 0;
74 	memory_region->flags = flags;
75 	memory_region->handle = handle;
76 	memory_region->tag = tag;
77 	memory_region->reserved_40_47 = 0;
78 	memory_region->emad_count = receiver_count;
79 	for (uint32_t i = 0U; i < receiver_count; i++) {
80 		emad[i].mapd.endpoint_id = receivers[i];
81 		emad[i].mapd.memory_access_permissions = permissions;
82 		emad[i].mapd.flags = 0;
83 		emad[i].comp_mrd_offset = 0;
84 		emad[i].reserved_8_15 = 0;
85 	}
86 }
87 /**
88  * Initialises the given `ffa_mtd` to be used for an
89  * `FFA_MEM_RETRIEVE_REQ` by the receiver of a memory transaction.
90  * TODO: Support differing attributes per receiver.
91  *
92  * Returns the size of the descriptor written.
93  */
ffa_memory_retrieve_request_init(struct ffa_mtd * memory_region,uint64_t handle,ffa_endpoint_id16_t sender,ffa_endpoint_id16_t * receivers,uint32_t receiver_count,uint64_t tag,ffa_mtd_flag32_t flags,ffa_mem_perm8_t permissions,ffa_mem_attr16_t attributes)94 static uint32_t ffa_memory_retrieve_request_init(
95 	struct ffa_mtd *memory_region, uint64_t handle,
96 	ffa_endpoint_id16_t sender, ffa_endpoint_id16_t *receivers, uint32_t receiver_count,
97 	uint64_t tag, ffa_mtd_flag32_t flags,
98 	ffa_mem_perm8_t permissions,
99 	ffa_mem_attr16_t attributes)
100 {
101 	ffa_memory_region_init_header(memory_region, sender, attributes, flags,
102 				      handle, tag, receivers,
103 				      receiver_count, permissions);
104 
105 	return sizeof(struct ffa_mtd) +
106 	       memory_region->emad_count * sizeof(struct ffa_emad_v1_0);
107 }
108 
109 /* Relinquish access to memory region. */
ffa_mem_relinquish(void)110 bool ffa_mem_relinquish(void)
111 {
112 	smc_args_t ret;
113 
114 	ret = smc_helper(FFA_MEM_RELINQUISH, 0, 0, 0, 0, 0, 0, 0);
115 	if (ffa_func_id(ret) != FFA_SUCCESS_SMC32) {
116 		ERROR("%s failed to relinquish memory! error: (%x) %x\n",
117 		      __func__, ffa_func_id(ret), ffa_error_code(ret));
118 		return false;
119 	}
120 	return true;
121 }
122 
123 /* Retrieve memory shared by another partition. */
ffa_mem_retrieve_req(uint32_t descriptor_length,uint32_t fragment_length)124 smc_args_t ffa_mem_retrieve_req(uint32_t descriptor_length,
125 				uint32_t fragment_length)
126 {
127 	return smc_helper(FFA_MEM_RETRIEVE_REQ_SMC32,
128 		      descriptor_length,
129 		      fragment_length,
130 		      0, 0, 0, 0, 0);
131 }
132 
133 /* Retrieve the next memory descriptor fragment. */
ffa_mem_frag_rx(uint64_t handle,uint32_t recv_length)134 smc_args_t ffa_mem_frag_rx(uint64_t handle, uint32_t recv_length)
135 {
136 	return smc_helper(FFA_MEM_FRAG_RX,
137 		       FFA_MEM_HANDLE_LOW(handle),
138 		       FFA_MEM_HANDLE_HIGH(handle),
139 		       recv_length,
140 		       0, 0, 0, 0);
141 }
142 
memory_retrieve(struct mailbox * mb,struct ffa_mtd ** retrieved,uint64_t handle,ffa_endpoint_id16_t sender,ffa_endpoint_id16_t * receivers,uint32_t receiver_count,ffa_mtd_flag32_t flags,uint32_t * frag_length,uint32_t * total_length)143 bool memory_retrieve(struct mailbox *mb,
144 			    struct ffa_mtd **retrieved,
145 			    uint64_t handle, ffa_endpoint_id16_t sender,
146 			    ffa_endpoint_id16_t *receivers, uint32_t receiver_count,
147 			    ffa_mtd_flag32_t flags, uint32_t *frag_length,
148 			    uint32_t *total_length)
149 {
150 	smc_args_t ret;
151 	uint32_t descriptor_size;
152 	struct ffa_mtd *memory_region;
153 
154 	if (retrieved == NULL || mb == NULL) {
155 		ERROR("Invalid parameters!\n");
156 		return false;
157 	}
158 
159 	memory_region = (struct ffa_mtd *)mb->tx_buffer;
160 
161 	/* Clear TX buffer. */
162 	memset(memory_region, 0, PAGE_SIZE);
163 
164 	/* Clear local buffer. */
165 	memset(mem_region_buffer, 0, REGION_BUF_SIZE);
166 
167 	descriptor_size = ffa_memory_retrieve_request_init(
168 	    memory_region, handle, sender, receivers, receiver_count, 0, flags,
169 	    FFA_MEM_PERM_RW | FFA_MEM_PERM_NX,
170 	    FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB |
171 	    FFA_MEM_ATTR_INNER_SHAREABLE);
172 
173 	ret = ffa_mem_retrieve_req(descriptor_size, descriptor_size);
174 
175 	if (ffa_func_id(ret) == FFA_ERROR) {
176 		ERROR("Couldn't retrieve the memory page. Error: %x\n",
177 		      ffa_error_code(ret));
178 		return false;
179 	}
180 
181 	/*
182 	 * Following total_size and fragment_size are useful to keep track
183 	 * of the state of transaction. When the sum of all fragment_size of all
184 	 * fragments is equal to total_size, the memory transaction has been
185 	 * completed.
186 	 */
187 	*total_length = ret._regs[1];
188 	*frag_length = ret._regs[2];
189 
190 	/* Validate frag_length is less than total_length and mailbox size. */
191 	if (*frag_length == 0U || *total_length == 0U ||
192 	    *frag_length > *total_length || *frag_length > (mb->rxtx_page_count * PAGE_SIZE)) {
193 		ERROR("Invalid parameters!\n");
194 		return false;
195 	}
196 
197 	/* Copy response to local buffer. */
198 	memcpy(mem_region_buffer, mb->rx_buffer, *frag_length);
199 
200 	if (ffa_rx_release()) {
201 		ERROR("Failed to release buffer!\n");
202 		return false;
203 	}
204 
205 	*retrieved = (struct ffa_mtd *) mem_region_buffer;
206 
207 	if ((*retrieved)->emad_count > MAX_MEM_SHARE_RECIPIENTS) {
208 		VERBOSE("SPMC memory sharing supports max of %u receivers!\n",
209 			MAX_MEM_SHARE_RECIPIENTS);
210 		return false;
211 	}
212 
213 	/*
214 	 * We are sharing memory from the normal world therefore validate the NS
215 	 * bit was set by the SPMC.
216 	 */
217 	if (((*retrieved)->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) == 0U) {
218 		ERROR("SPMC has not set the NS bit! 0x%x\n",
219 		      (*retrieved)->memory_region_attributes);
220 		return false;
221 	}
222 
223 	VERBOSE("Memory Descriptor Retrieved!\n");
224 
225 	return true;
226 }
227 
228 /* Relinquish the memory region. */
memory_relinquish(struct ffa_mem_relinquish_descriptor * m,uint64_t handle,ffa_endpoint_id16_t id)229 bool memory_relinquish(struct ffa_mem_relinquish_descriptor *m, uint64_t handle,
230 		       ffa_endpoint_id16_t id)
231 {
232 	ffa_mem_relinquish_init(m, handle, 0, id);
233 	return ffa_mem_relinquish();
234 }
235 
236 /* Query SPMC that the rx buffer of the partition can be released. */
ffa_rx_release(void)237 bool ffa_rx_release(void)
238 {
239 	smc_args_t ret;
240 
241 	ret = smc_helper(FFA_RX_RELEASE, 0, 0, 0, 0, 0, 0, 0);
242 	return ret._regs[SMC_ARG0] != FFA_SUCCESS_SMC32;
243 }
244 
245 /* Map the provided buffers with the SPMC. */
ffa_rxtx_map(uintptr_t send,uintptr_t recv,uint32_t pages)246 bool ffa_rxtx_map(uintptr_t send, uintptr_t recv, uint32_t pages)
247 {
248 	smc_args_t ret;
249 
250 	ret = smc_helper(FFA_RXTX_MAP_SMC64, send, recv, pages, 0, 0, 0, 0);
251 	return ret._regs[0] != FFA_SUCCESS_SMC32;
252 }
253