1 /*
2  * Copyright (c) 2022 Arm Limited. All rights reserved.
3  *
4  * SPDX-License-Identifier:    BSD-3-Clause
5  *
6  * DRTM DMA protection.
7  *
8  * Authors:
9  *      Lucian Paul-Trifu <lucian.paultrifu@gmail.com>
10  *
11  */
12 
13 #include <stdint.h>
14 #include <string.h>
15 
16 #include <common/debug.h>
17 #include <drivers/arm/smmu_v3.h>
18 #include "drtm_dma_prot.h"
19 #include "drtm_main.h"
20 #include "drtm_remediation.h"
21 #include <plat/common/platform.h>
22 #include <smccc_helpers.h>
23 
24 /*
25  *  ________________________  LAUNCH success        ________________________
26  * |        Initial         | -------------------> |      Prot engaged      |
27  * |````````````````````````|                      |````````````````````````|
28  * |  request.type == NONE  |                      |  request.type != NONE  |
29  * |                        | <------------------- |                        |
30  * `________________________'        UNPROTECT_MEM `________________________'
31  *
32  * Transitions that are not shown correspond to ABI calls that do not change
33  * state and result in an error being returned to the caller.
34  */
35 static struct dma_prot active_prot = {
36 	.type = PROTECT_NONE,
37 };
38 
39 /* Version-independent type. */
40 typedef struct drtm_dl_dma_prot_args_v1 struct_drtm_dl_dma_prot_args;
41 
42 /*
43  * This function checks that platform supports complete DMA protection.
44  * and returns false - if the platform supports complete DMA protection.
45  * and returns true - if the platform does not support complete DMA protection.
46  */
drtm_dma_prot_init(void)47 bool drtm_dma_prot_init(void)
48 {
49 	bool must_init_fail = false;
50 	const uintptr_t *smmus;
51 	size_t num_smmus = 0;
52 	unsigned int total_smmus;
53 
54 	/* Warns presence of non-host platforms */
55 	if (plat_has_non_host_platforms()) {
56 		WARN("DRTM: the platform includes trusted DMA-capable devices"
57 				" (non-host platforms)\n");
58 	}
59 
60 	/*
61 	 * DLME protection is uncertain on platforms with peripherals whose
62 	 * DMA is not managed by an SMMU. DRTM doesn't work on such platforms.
63 	 */
64 	if (plat_has_unmanaged_dma_peripherals()) {
65 		ERROR("DRTM: this platform does not provide DMA protection\n");
66 		must_init_fail = true;
67 	}
68 
69 	/*
70 	 * Check that the platform reported all SMMUs.
71 	 * It is acceptable if the platform doesn't have any SMMUs when it
72 	 * doesn't have any DMA-capable devices.
73 	 */
74 	total_smmus = plat_get_total_smmus();
75 	plat_enumerate_smmus(&smmus, &num_smmus);
76 	if (num_smmus != total_smmus) {
77 		ERROR("DRTM: could not discover all SMMUs\n");
78 		must_init_fail = true;
79 	}
80 
81 	return must_init_fail;
82 }
83 
84 /*
85  * Checks that the DMA protection arguments are valid and that the given
86  * protected regions are covered by DMA protection.
87  */
drtm_dma_prot_check_args(const struct_drtm_dl_dma_prot_args * a,int a_dma_prot_type,drtm_mem_region_t p)88 enum drtm_retc drtm_dma_prot_check_args(const struct_drtm_dl_dma_prot_args *a,
89 					int a_dma_prot_type,
90 					drtm_mem_region_t p)
91 {
92 	switch ((enum dma_prot_type)a_dma_prot_type) {
93 	case PROTECT_MEM_ALL:
94 		if (a->dma_prot_table_paddr || a->dma_prot_table_size) {
95 			ERROR("DRTM: invalid launch due to inconsistent"
96 			      " DMA protection arguments\n");
97 			return MEM_PROTECT_INVALID;
98 		}
99 		/*
100 		 * Full DMA protection ought to ensure that the DLME and NWd
101 		 * DCE regions are protected, no further checks required.
102 		 */
103 		return SUCCESS;
104 
105 	default:
106 		ERROR("DRTM: invalid launch due to unsupported DMA protection type\n");
107 		return MEM_PROTECT_INVALID;
108 	}
109 }
110 
drtm_dma_prot_engage(const struct_drtm_dl_dma_prot_args * a,int a_dma_prot_type)111 enum drtm_retc drtm_dma_prot_engage(const struct_drtm_dl_dma_prot_args *a,
112 				    int a_dma_prot_type)
113 {
114 	const uintptr_t *smmus;
115 	size_t num_smmus = 0;
116 
117 	if (active_prot.type != PROTECT_NONE) {
118 		ERROR("DRTM: launch denied as previous DMA protection"
119 		      " is still engaged\n");
120 		return DENIED;
121 	}
122 
123 	if (a_dma_prot_type == PROTECT_NONE) {
124 		return SUCCESS;
125 		/* Only PROTECT_MEM_ALL is supported currently. */
126 	} else if (a_dma_prot_type != PROTECT_MEM_ALL) {
127 		ERROR("%s(): unimplemented DMA protection type\n", __func__);
128 		panic();
129 	}
130 
131 	/*
132 	 * Engage SMMUs in accordance with the request we have previously received.
133 	 * Only PROTECT_MEM_ALL is implemented currently.
134 	 */
135 	plat_enumerate_smmus(&smmus, &num_smmus);
136 	for (const uintptr_t *smmu = smmus; smmu < smmus+num_smmus; smmu++) {
137 		/*
138 		 * TODO: Invalidate SMMU's Stage-1 and Stage-2 TLB entries.  This ensures
139 		 * that any outstanding device transactions are completed, see Section
140 		 * 3.21.1, specification IHI_0070_C_a for an approximate reference.
141 		 */
142 		int rc = smmuv3_ns_set_abort_all(*smmu);
143 		if (rc != 0) {
144 			ERROR("DRTM: SMMU at PA 0x%lx failed to engage DMA protection"
145 			      " rc=%d\n", *smmu, rc);
146 			return INTERNAL_ERROR;
147 		}
148 	}
149 
150 	/*
151 	 * TODO: Restrict DMA from the GIC.
152 	 *
153 	 * Full DMA protection may be achieved as follows:
154 	 *
155 	 * With a GICv3:
156 	 * - Set GICR_CTLR.EnableLPIs to 0, for each GICR;
157 	 *   GICR_CTLR.RWP == 0 must be the case before finishing, for each GICR.
158 	 * - Set GITS_CTLR.Enabled to 0;
159 	 *   GITS_CTLR.Quiescent == 1 must be the case before finishing.
160 	 *
161 	 * In addition, with a GICv4:
162 	 * - Set GICR_VPENDBASER.Valid to 0, for each GICR;
163 	 *   GICR_CTLR.RWP == 0 must be the case before finishing, for each GICR.
164 	 *
165 	 * Alternatively, e.g. if some bit values cannot be changed at runtime,
166 	 * this procedure should return an error if the LPI Pending and
167 	 * Configuration tables overlap the regions being protected.
168 	 */
169 
170 	active_prot.type = a_dma_prot_type;
171 
172 	return SUCCESS;
173 }
174 
175 /*
176  * Undo what has previously been done in drtm_dma_prot_engage(), or enter
177  * remediation if it is not possible.
178  */
drtm_dma_prot_disengage(void)179 enum drtm_retc drtm_dma_prot_disengage(void)
180 {
181 	const uintptr_t *smmus;
182 	size_t num_smmus = 0;
183 	const char *err_str = "cannot undo PROTECT_MEM_ALL SMMU config";
184 
185 	if (active_prot.type == PROTECT_NONE) {
186 		return SUCCESS;
187 		/* Only PROTECT_MEM_ALL is supported currently. */
188 	} else if (active_prot.type != PROTECT_MEM_ALL) {
189 		ERROR("%s(): unimplemented DMA protection type\n", __func__);
190 		panic();
191 	}
192 
193 	/*
194 	 * For PROTECT_MEM_ALL, undo the SMMU configuration for "abort all" mode
195 	 * done during engage().
196 	 */
197 	/* Simply enter remediation for now. */
198 	(void)smmus;
199 	(void)num_smmus;
200 	drtm_enter_remediation(1ULL, err_str);
201 
202 	/* TODO: Undo GIC DMA restrictions. */
203 
204 	active_prot.type = PROTECT_NONE;
205 
206 	return SUCCESS;
207 }
208 
drtm_unprotect_mem(void * ctx)209 uint64_t drtm_unprotect_mem(void *ctx)
210 {
211 	enum drtm_retc ret;
212 
213 	switch (active_prot.type) {
214 	case PROTECT_NONE:
215 		ERROR("DRTM: invalid UNPROTECT_MEM, no DMA protection has"
216 		      " previously been engaged\n");
217 		ret = DENIED;
218 		break;
219 
220 	case PROTECT_MEM_ALL:
221 		/*
222 		 * UNPROTECT_MEM is a no-op for PROTECT_MEM_ALL:  DRTM must not touch
223 		 * the NS SMMU as it is expected that the DLME has configured it.
224 		 */
225 		active_prot.type = PROTECT_NONE;
226 
227 		ret = SUCCESS;
228 		break;
229 
230 	default:
231 		ret = drtm_dma_prot_disengage();
232 		break;
233 	}
234 
235 	SMC_RET1(ctx, ret);
236 }
237 
drtm_dma_prot_serialise_table(uint8_t * dst,size_t * size_out)238 void drtm_dma_prot_serialise_table(uint8_t *dst, size_t *size_out)
239 {
240 	if (active_prot.type == PROTECT_NONE) {
241 		return;
242 	} else if (active_prot.type != PROTECT_MEM_ALL) {
243 		ERROR("%s(): unimplemented DMA protection type\n", __func__);
244 		panic();
245 	}
246 
247 	struct __packed descr_table_1 {
248 		drtm_memory_region_descriptor_table_t header;
249 		drtm_mem_region_t regions[1];
250 	} prot_table = {
251 		.header = {
252 			.revision = 1,
253 			.num_regions = sizeof(((struct descr_table_1 *)NULL)->regions) /
254 				sizeof(((struct descr_table_1 *)NULL)->regions[0])
255 		},
256 		.regions = {
257 			{.region_address = 0, PAGES_AND_TYPE(UINT64_MAX, 0x3)},
258 		}
259 	};
260 
261 	memcpy(dst, &prot_table, sizeof(prot_table));
262 	*size_out = sizeof(prot_table);
263 }
264