1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016-2018 Etnaviv Project
4  */
5 
6 #include <linux/platform_device.h>
7 #include <linux/sizes.h>
8 #include <linux/slab.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/bitops.h>
11 
12 #include "etnaviv_cmdbuf.h"
13 #include "etnaviv_gpu.h"
14 #include "etnaviv_mmu.h"
15 #include "etnaviv_iommu.h"
16 #include "state.xml.h"
17 #include "state_hi.xml.h"
18 
19 #define MMUv2_PTE_PRESENT		BIT(0)
20 #define MMUv2_PTE_EXCEPTION		BIT(1)
21 #define MMUv2_PTE_WRITEABLE		BIT(2)
22 
23 #define MMUv2_MTLB_MASK			0xffc00000
24 #define MMUv2_MTLB_SHIFT		22
25 #define MMUv2_STLB_MASK			0x003ff000
26 #define MMUv2_STLB_SHIFT		12
27 
28 #define MMUv2_MAX_STLB_ENTRIES		1024
29 
30 struct etnaviv_iommuv2_domain {
31 	struct etnaviv_iommu_domain base;
32 	/* P(age) T(able) A(rray) */
33 	u64 *pta_cpu;
34 	dma_addr_t pta_dma;
35 	/* M(aster) TLB aka first level pagetable */
36 	u32 *mtlb_cpu;
37 	dma_addr_t mtlb_dma;
38 	/* S(lave) TLB aka second level pagetable */
39 	u32 *stlb_cpu[MMUv2_MAX_STLB_ENTRIES];
40 	dma_addr_t stlb_dma[MMUv2_MAX_STLB_ENTRIES];
41 };
42 
43 static struct etnaviv_iommuv2_domain *
to_etnaviv_domain(struct etnaviv_iommu_domain * domain)44 to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
45 {
46 	return container_of(domain, struct etnaviv_iommuv2_domain, base);
47 }
48 
49 static int
etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_domain * etnaviv_domain,int stlb)50 etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_domain *etnaviv_domain,
51 			    int stlb)
52 {
53 	if (etnaviv_domain->stlb_cpu[stlb])
54 		return 0;
55 
56 	etnaviv_domain->stlb_cpu[stlb] =
57 			dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
58 				     &etnaviv_domain->stlb_dma[stlb],
59 				     GFP_KERNEL);
60 
61 	if (!etnaviv_domain->stlb_cpu[stlb])
62 		return -ENOMEM;
63 
64 	memset32(etnaviv_domain->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
65 		 SZ_4K / sizeof(u32));
66 
67 	etnaviv_domain->mtlb_cpu[stlb] = etnaviv_domain->stlb_dma[stlb] |
68 						      MMUv2_PTE_PRESENT;
69 	return 0;
70 }
71 
etnaviv_iommuv2_map(struct etnaviv_iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot)72 static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
73 			       unsigned long iova, phys_addr_t paddr,
74 			       size_t size, int prot)
75 {
76 	struct etnaviv_iommuv2_domain *etnaviv_domain =
77 			to_etnaviv_domain(domain);
78 	int mtlb_entry, stlb_entry, ret;
79 	u32 entry = lower_32_bits(paddr) | MMUv2_PTE_PRESENT;
80 
81 	if (size != SZ_4K)
82 		return -EINVAL;
83 
84 	if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
85 		entry |= (upper_32_bits(paddr) & 0xff) << 4;
86 
87 	if (prot & ETNAVIV_PROT_WRITE)
88 		entry |= MMUv2_PTE_WRITEABLE;
89 
90 	mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
91 	stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
92 
93 	ret = etnaviv_iommuv2_ensure_stlb(etnaviv_domain, mtlb_entry);
94 	if (ret)
95 		return ret;
96 
97 	etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
98 
99 	return 0;
100 }
101 
etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain * domain,unsigned long iova,size_t size)102 static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
103 				    unsigned long iova, size_t size)
104 {
105 	struct etnaviv_iommuv2_domain *etnaviv_domain =
106 			to_etnaviv_domain(domain);
107 	int mtlb_entry, stlb_entry;
108 
109 	if (size != SZ_4K)
110 		return -EINVAL;
111 
112 	mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
113 	stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
114 
115 	etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = MMUv2_PTE_EXCEPTION;
116 
117 	return SZ_4K;
118 }
119 
etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain * etnaviv_domain)120 static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
121 {
122 	int ret;
123 
124 	/* allocate scratch page */
125 	etnaviv_domain->base.bad_page_cpu =
126 			dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
127 				     &etnaviv_domain->base.bad_page_dma,
128 				     GFP_KERNEL);
129 	if (!etnaviv_domain->base.bad_page_cpu) {
130 		ret = -ENOMEM;
131 		goto fail_mem;
132 	}
133 
134 	memset32(etnaviv_domain->base.bad_page_cpu, 0xdead55aa,
135 		 SZ_4K / sizeof(u32));
136 
137 	etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
138 					       SZ_4K, &etnaviv_domain->pta_dma,
139 					       GFP_KERNEL);
140 	if (!etnaviv_domain->pta_cpu) {
141 		ret = -ENOMEM;
142 		goto fail_mem;
143 	}
144 
145 	etnaviv_domain->mtlb_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
146 						SZ_4K, &etnaviv_domain->mtlb_dma,
147 						GFP_KERNEL);
148 	if (!etnaviv_domain->mtlb_cpu) {
149 		ret = -ENOMEM;
150 		goto fail_mem;
151 	}
152 
153 	memset32(etnaviv_domain->mtlb_cpu, MMUv2_PTE_EXCEPTION,
154 		 MMUv2_MAX_STLB_ENTRIES);
155 
156 	return 0;
157 
158 fail_mem:
159 	if (etnaviv_domain->base.bad_page_cpu)
160 		dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
161 			    etnaviv_domain->base.bad_page_cpu,
162 			    etnaviv_domain->base.bad_page_dma);
163 
164 	if (etnaviv_domain->pta_cpu)
165 		dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
166 			    etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
167 
168 	if (etnaviv_domain->mtlb_cpu)
169 		dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
170 			    etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
171 
172 	return ret;
173 }
174 
etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain * domain)175 static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
176 {
177 	struct etnaviv_iommuv2_domain *etnaviv_domain =
178 			to_etnaviv_domain(domain);
179 	int i;
180 
181 	dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
182 		    etnaviv_domain->base.bad_page_cpu,
183 		    etnaviv_domain->base.bad_page_dma);
184 
185 	dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
186 		    etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
187 
188 	dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
189 		    etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
190 
191 	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
192 		if (etnaviv_domain->stlb_cpu[i])
193 			dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
194 				    etnaviv_domain->stlb_cpu[i],
195 				    etnaviv_domain->stlb_dma[i]);
196 	}
197 
198 	vfree(etnaviv_domain);
199 }
200 
etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain * domain)201 static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
202 {
203 	struct etnaviv_iommuv2_domain *etnaviv_domain =
204 			to_etnaviv_domain(domain);
205 	size_t dump_size = SZ_4K;
206 	int i;
207 
208 	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
209 		if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
210 			dump_size += SZ_4K;
211 
212 	return dump_size;
213 }
214 
etnaviv_iommuv2_dump(struct etnaviv_iommu_domain * domain,void * buf)215 static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
216 {
217 	struct etnaviv_iommuv2_domain *etnaviv_domain =
218 			to_etnaviv_domain(domain);
219 	int i;
220 
221 	memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
222 	buf += SZ_4K;
223 	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
224 		if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
225 			memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
226 }
227 
etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu * gpu)228 static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
229 {
230 	struct etnaviv_iommuv2_domain *etnaviv_domain =
231 			to_etnaviv_domain(gpu->mmu->domain);
232 	u16 prefetch;
233 
234 	/* If the MMU is already enabled the state is still there. */
235 	if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
236 		return;
237 
238 	prefetch = etnaviv_buffer_config_mmuv2(gpu,
239 				(u32)etnaviv_domain->mtlb_dma,
240 				(u32)etnaviv_domain->base.bad_page_dma);
241 	etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
242 			     prefetch);
243 	etnaviv_gpu_wait_idle(gpu, 100);
244 
245 	gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
246 }
247 
etnaviv_iommuv2_restore_sec(struct etnaviv_gpu * gpu)248 static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
249 {
250 	struct etnaviv_iommuv2_domain *etnaviv_domain =
251 				to_etnaviv_domain(gpu->mmu->domain);
252 	u16 prefetch;
253 
254 	/* If the MMU is already enabled the state is still there. */
255 	if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
256 		return;
257 
258 	gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
259 		  lower_32_bits(etnaviv_domain->pta_dma));
260 	gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
261 		  upper_32_bits(etnaviv_domain->pta_dma));
262 	gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE);
263 
264 	gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW,
265 		  lower_32_bits(etnaviv_domain->base.bad_page_dma));
266 	gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW,
267 		  lower_32_bits(etnaviv_domain->base.bad_page_dma));
268 	gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG,
269 		  VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
270 		  upper_32_bits(etnaviv_domain->base.bad_page_dma)) |
271 		  VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
272 		  upper_32_bits(etnaviv_domain->base.bad_page_dma)));
273 
274 	etnaviv_domain->pta_cpu[0] = etnaviv_domain->mtlb_dma |
275 				     VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
276 
277 	/* trigger a PTA load through the FE */
278 	prefetch = etnaviv_buffer_config_pta(gpu);
279 	etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
280 			     prefetch);
281 	etnaviv_gpu_wait_idle(gpu, 100);
282 
283 	gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE);
284 }
285 
etnaviv_iommuv2_restore(struct etnaviv_gpu * gpu)286 void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
287 {
288 	switch (gpu->sec_mode) {
289 	case ETNA_SEC_NONE:
290 		etnaviv_iommuv2_restore_nonsec(gpu);
291 		break;
292 	case ETNA_SEC_KERNEL:
293 		etnaviv_iommuv2_restore_sec(gpu);
294 		break;
295 	default:
296 		WARN(1, "unhandled GPU security mode\n");
297 		break;
298 	}
299 }
300 
301 static const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
302 	.free = etnaviv_iommuv2_domain_free,
303 	.map = etnaviv_iommuv2_map,
304 	.unmap = etnaviv_iommuv2_unmap,
305 	.dump_size = etnaviv_iommuv2_dump_size,
306 	.dump = etnaviv_iommuv2_dump,
307 };
308 
309 struct etnaviv_iommu_domain *
etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu * gpu)310 etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
311 {
312 	struct etnaviv_iommuv2_domain *etnaviv_domain;
313 	struct etnaviv_iommu_domain *domain;
314 	int ret;
315 
316 	etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
317 	if (!etnaviv_domain)
318 		return NULL;
319 
320 	domain = &etnaviv_domain->base;
321 
322 	domain->dev = gpu->dev;
323 	domain->base = 0;
324 	domain->size = (u64)SZ_1G * 4;
325 	domain->ops = &etnaviv_iommuv2_ops;
326 
327 	ret = etnaviv_iommuv2_init(etnaviv_domain);
328 	if (ret)
329 		goto out_free;
330 
331 	return &etnaviv_domain->base;
332 
333 out_free:
334 	vfree(etnaviv_domain);
335 	return NULL;
336 }
337