1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2016-2018 Etnaviv Project
4 */
5
6 #include <linux/bitops.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/platform_device.h>
9 #include <linux/sizes.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12
13 #include "etnaviv_cmdbuf.h"
14 #include "etnaviv_gpu.h"
15 #include "etnaviv_mmu.h"
16 #include "state.xml.h"
17 #include "state_hi.xml.h"
18
19 #define MMUv2_PTE_PRESENT BIT(0)
20 #define MMUv2_PTE_EXCEPTION BIT(1)
21 #define MMUv2_PTE_WRITEABLE BIT(2)
22
23 #define MMUv2_MTLB_MASK 0xffc00000
24 #define MMUv2_MTLB_SHIFT 22
25 #define MMUv2_STLB_MASK 0x003ff000
26 #define MMUv2_STLB_SHIFT 12
27
28 #define MMUv2_MAX_STLB_ENTRIES 1024
29
30 struct etnaviv_iommuv2_context {
31 struct etnaviv_iommu_context base;
32 unsigned short id;
33 /* M(aster) TLB aka first level pagetable */
34 u32 *mtlb_cpu;
35 dma_addr_t mtlb_dma;
36 /* S(lave) TLB aka second level pagetable */
37 u32 *stlb_cpu[MMUv2_MAX_STLB_ENTRIES];
38 dma_addr_t stlb_dma[MMUv2_MAX_STLB_ENTRIES];
39 };
40
41 static struct etnaviv_iommuv2_context *
to_v2_context(struct etnaviv_iommu_context * context)42 to_v2_context(struct etnaviv_iommu_context *context)
43 {
44 return container_of(context, struct etnaviv_iommuv2_context, base);
45 }
46
etnaviv_iommuv2_free(struct etnaviv_iommu_context * context)47 static void etnaviv_iommuv2_free(struct etnaviv_iommu_context *context)
48 {
49 struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
50 int i;
51
52 drm_mm_takedown(&context->mm);
53
54 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
55 if (v2_context->stlb_cpu[i])
56 dma_free_wc(context->global->dev, SZ_4K,
57 v2_context->stlb_cpu[i],
58 v2_context->stlb_dma[i]);
59 }
60
61 dma_free_wc(context->global->dev, SZ_4K, v2_context->mtlb_cpu,
62 v2_context->mtlb_dma);
63
64 clear_bit(v2_context->id, context->global->v2.pta_alloc);
65
66 vfree(v2_context);
67 }
68 static int
etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_context * v2_context,int stlb)69 etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_context *v2_context,
70 int stlb)
71 {
72 if (v2_context->stlb_cpu[stlb])
73 return 0;
74
75 v2_context->stlb_cpu[stlb] =
76 dma_alloc_wc(v2_context->base.global->dev, SZ_4K,
77 &v2_context->stlb_dma[stlb],
78 GFP_KERNEL);
79
80 if (!v2_context->stlb_cpu[stlb])
81 return -ENOMEM;
82
83 memset32(v2_context->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
84 SZ_4K / sizeof(u32));
85
86 v2_context->mtlb_cpu[stlb] =
87 v2_context->stlb_dma[stlb] | MMUv2_PTE_PRESENT;
88
89 return 0;
90 }
91
etnaviv_iommuv2_map(struct etnaviv_iommu_context * context,unsigned long iova,phys_addr_t paddr,size_t size,int prot)92 static int etnaviv_iommuv2_map(struct etnaviv_iommu_context *context,
93 unsigned long iova, phys_addr_t paddr,
94 size_t size, int prot)
95 {
96 struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
97 int mtlb_entry, stlb_entry, ret;
98 u32 entry = lower_32_bits(paddr) | MMUv2_PTE_PRESENT;
99
100 if (size != SZ_4K)
101 return -EINVAL;
102
103 if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
104 entry |= (upper_32_bits(paddr) & 0xff) << 4;
105
106 if (prot & ETNAVIV_PROT_WRITE)
107 entry |= MMUv2_PTE_WRITEABLE;
108
109 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
110 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
111
112 ret = etnaviv_iommuv2_ensure_stlb(v2_context, mtlb_entry);
113 if (ret)
114 return ret;
115
116 v2_context->stlb_cpu[mtlb_entry][stlb_entry] = entry;
117
118 return 0;
119 }
120
etnaviv_iommuv2_unmap(struct etnaviv_iommu_context * context,unsigned long iova,size_t size)121 static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_context *context,
122 unsigned long iova, size_t size)
123 {
124 struct etnaviv_iommuv2_context *etnaviv_domain = to_v2_context(context);
125 int mtlb_entry, stlb_entry;
126
127 if (size != SZ_4K)
128 return -EINVAL;
129
130 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
131 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
132
133 etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = MMUv2_PTE_EXCEPTION;
134
135 return SZ_4K;
136 }
137
etnaviv_iommuv2_dump_size(struct etnaviv_iommu_context * context)138 static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_context *context)
139 {
140 struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
141 size_t dump_size = SZ_4K;
142 int i;
143
144 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
145 if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
146 dump_size += SZ_4K;
147
148 return dump_size;
149 }
150
etnaviv_iommuv2_dump(struct etnaviv_iommu_context * context,void * buf)151 static void etnaviv_iommuv2_dump(struct etnaviv_iommu_context *context, void *buf)
152 {
153 struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
154 int i;
155
156 memcpy(buf, v2_context->mtlb_cpu, SZ_4K);
157 buf += SZ_4K;
158 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
159 if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT) {
160 memcpy(buf, v2_context->stlb_cpu[i], SZ_4K);
161 buf += SZ_4K;
162 }
163 }
164
etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu * gpu,struct etnaviv_iommu_context * context)165 static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
166 struct etnaviv_iommu_context *context)
167 {
168 struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
169 u16 prefetch;
170
171 /* If the MMU is already enabled the state is still there. */
172 if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
173 return;
174
175 prefetch = etnaviv_buffer_config_mmuv2(gpu,
176 (u32)v2_context->mtlb_dma,
177 (u32)context->global->bad_page_dma);
178 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
179 prefetch);
180 etnaviv_gpu_wait_idle(gpu, 100);
181
182 gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
183 }
184
etnaviv_iommuv2_restore_sec(struct etnaviv_gpu * gpu,struct etnaviv_iommu_context * context)185 static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
186 struct etnaviv_iommu_context *context)
187 {
188 struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
189 u16 prefetch;
190
191 /* If the MMU is already enabled the state is still there. */
192 if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
193 return;
194
195 gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
196 lower_32_bits(context->global->v2.pta_dma));
197 gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
198 upper_32_bits(context->global->v2.pta_dma));
199 gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE);
200
201 gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW,
202 lower_32_bits(context->global->bad_page_dma));
203 gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW,
204 lower_32_bits(context->global->bad_page_dma));
205 gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG,
206 VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
207 upper_32_bits(context->global->bad_page_dma)) |
208 VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
209 upper_32_bits(context->global->bad_page_dma)));
210
211 context->global->v2.pta_cpu[v2_context->id] = v2_context->mtlb_dma |
212 VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
213
214 /* trigger a PTA load through the FE */
215 prefetch = etnaviv_buffer_config_pta(gpu, v2_context->id);
216 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
217 prefetch);
218 etnaviv_gpu_wait_idle(gpu, 100);
219
220 gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE);
221 }
222
etnaviv_iommuv2_get_mtlb_addr(struct etnaviv_iommu_context * context)223 u32 etnaviv_iommuv2_get_mtlb_addr(struct etnaviv_iommu_context *context)
224 {
225 struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
226
227 return v2_context->mtlb_dma;
228 }
229
etnaviv_iommuv2_get_pta_id(struct etnaviv_iommu_context * context)230 unsigned short etnaviv_iommuv2_get_pta_id(struct etnaviv_iommu_context *context)
231 {
232 struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
233
234 return v2_context->id;
235 }
etnaviv_iommuv2_restore(struct etnaviv_gpu * gpu,struct etnaviv_iommu_context * context)236 static void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu,
237 struct etnaviv_iommu_context *context)
238 {
239 switch (gpu->sec_mode) {
240 case ETNA_SEC_NONE:
241 etnaviv_iommuv2_restore_nonsec(gpu, context);
242 break;
243 case ETNA_SEC_KERNEL:
244 etnaviv_iommuv2_restore_sec(gpu, context);
245 break;
246 default:
247 WARN(1, "unhandled GPU security mode\n");
248 break;
249 }
250 }
251
252 const struct etnaviv_iommu_ops etnaviv_iommuv2_ops = {
253 .free = etnaviv_iommuv2_free,
254 .map = etnaviv_iommuv2_map,
255 .unmap = etnaviv_iommuv2_unmap,
256 .dump_size = etnaviv_iommuv2_dump_size,
257 .dump = etnaviv_iommuv2_dump,
258 .restore = etnaviv_iommuv2_restore,
259 };
260
261 struct etnaviv_iommu_context *
etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global * global)262 etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global)
263 {
264 struct etnaviv_iommuv2_context *v2_context;
265 struct etnaviv_iommu_context *context;
266
267 v2_context = vzalloc(sizeof(*v2_context));
268 if (!v2_context)
269 return NULL;
270
271 mutex_lock(&global->lock);
272 v2_context->id = find_first_zero_bit(global->v2.pta_alloc,
273 ETNAVIV_PTA_ENTRIES);
274 if (v2_context->id < ETNAVIV_PTA_ENTRIES) {
275 set_bit(v2_context->id, global->v2.pta_alloc);
276 } else {
277 mutex_unlock(&global->lock);
278 goto out_free;
279 }
280 mutex_unlock(&global->lock);
281
282 v2_context->mtlb_cpu = dma_alloc_wc(global->dev, SZ_4K,
283 &v2_context->mtlb_dma, GFP_KERNEL);
284 if (!v2_context->mtlb_cpu)
285 goto out_free_id;
286
287 memset32(v2_context->mtlb_cpu, MMUv2_PTE_EXCEPTION,
288 MMUv2_MAX_STLB_ENTRIES);
289
290 global->v2.pta_cpu[v2_context->id] = v2_context->mtlb_dma;
291
292 context = &v2_context->base;
293 context->global = global;
294 kref_init(&context->refcount);
295 mutex_init(&context->lock);
296 INIT_LIST_HEAD(&context->mappings);
297 drm_mm_init(&context->mm, SZ_4K, (u64)SZ_1G * 4 - SZ_4K);
298
299 return context;
300
301 out_free_id:
302 clear_bit(v2_context->id, global->v2.pta_alloc);
303 out_free:
304 vfree(v2_context);
305 return NULL;
306 }
307