1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2015-2018 Etnaviv Project
4 */
5
6 #include <linux/dma-mapping.h>
7 #include <linux/scatterlist.h>
8
9 #include "common.xml.h"
10 #include "etnaviv_cmdbuf.h"
11 #include "etnaviv_drv.h"
12 #include "etnaviv_gem.h"
13 #include "etnaviv_gpu.h"
14 #include "etnaviv_mmu.h"
15
etnaviv_context_unmap(struct etnaviv_iommu_context * context,unsigned long iova,size_t size)16 static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
17 unsigned long iova, size_t size)
18 {
19 size_t unmapped_page, unmapped = 0;
20 size_t pgsize = SZ_4K;
21
22 if (!IS_ALIGNED(iova | size, pgsize)) {
23 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
24 iova, size, pgsize);
25 return;
26 }
27
28 while (unmapped < size) {
29 unmapped_page = context->global->ops->unmap(context, iova,
30 pgsize);
31 if (!unmapped_page)
32 break;
33
34 iova += unmapped_page;
35 unmapped += unmapped_page;
36 }
37 }
38
etnaviv_context_map(struct etnaviv_iommu_context * context,unsigned long iova,phys_addr_t paddr,size_t size,int prot)39 static int etnaviv_context_map(struct etnaviv_iommu_context *context,
40 unsigned long iova, phys_addr_t paddr,
41 size_t size, int prot)
42 {
43 unsigned long orig_iova = iova;
44 size_t pgsize = SZ_4K;
45 size_t orig_size = size;
46 int ret = 0;
47
48 if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
49 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
50 iova, &paddr, size, pgsize);
51 return -EINVAL;
52 }
53
54 while (size) {
55 ret = context->global->ops->map(context, iova, paddr, pgsize,
56 prot);
57 if (ret)
58 break;
59
60 iova += pgsize;
61 paddr += pgsize;
62 size -= pgsize;
63 }
64
65 /* unroll mapping in case something went wrong */
66 if (ret)
67 etnaviv_context_unmap(context, orig_iova, orig_size - size);
68
69 return ret;
70 }
71
etnaviv_iommu_map(struct etnaviv_iommu_context * context,u32 iova,struct sg_table * sgt,unsigned len,int prot)72 static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
73 struct sg_table *sgt, unsigned len, int prot)
74 { struct scatterlist *sg;
75 unsigned int da = iova;
76 unsigned int i;
77 int ret;
78
79 if (!context || !sgt)
80 return -EINVAL;
81
82 for_each_sgtable_dma_sg(sgt, sg, i) {
83 u32 pa = sg_dma_address(sg) - sg->offset;
84 size_t bytes = sg_dma_len(sg) + sg->offset;
85
86 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
87
88 ret = etnaviv_context_map(context, da, pa, bytes, prot);
89 if (ret)
90 goto fail;
91
92 da += bytes;
93 }
94
95 return 0;
96
97 fail:
98 etnaviv_context_unmap(context, iova, da - iova);
99 return ret;
100 }
101
etnaviv_iommu_unmap(struct etnaviv_iommu_context * context,u32 iova,struct sg_table * sgt,unsigned len)102 static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
103 struct sg_table *sgt, unsigned len)
104 {
105 struct scatterlist *sg;
106 unsigned int da = iova;
107 int i;
108
109 for_each_sgtable_dma_sg(sgt, sg, i) {
110 size_t bytes = sg_dma_len(sg) + sg->offset;
111
112 etnaviv_context_unmap(context, da, bytes);
113
114 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
115
116 BUG_ON(!PAGE_ALIGNED(bytes));
117
118 da += bytes;
119 }
120 }
121
etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping)122 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
123 struct etnaviv_vram_mapping *mapping)
124 {
125 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
126
127 lockdep_assert_held(&context->lock);
128
129 etnaviv_iommu_unmap(context, mapping->vram_node.start,
130 etnaviv_obj->sgt, etnaviv_obj->base.size);
131 drm_mm_remove_node(&mapping->vram_node);
132 }
133
etnaviv_iommu_find_iova(struct etnaviv_iommu_context * context,struct drm_mm_node * node,size_t size)134 static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
135 struct drm_mm_node *node, size_t size)
136 {
137 struct etnaviv_vram_mapping *free = NULL;
138 enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
139 int ret;
140
141 lockdep_assert_held(&context->lock);
142
143 while (1) {
144 struct etnaviv_vram_mapping *m, *n;
145 struct drm_mm_scan scan;
146 struct list_head list;
147 bool found;
148
149 ret = drm_mm_insert_node_in_range(&context->mm, node,
150 size, 0, 0, 0, U64_MAX, mode);
151 if (ret != -ENOSPC)
152 break;
153
154 /* Try to retire some entries */
155 drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
156
157 found = 0;
158 INIT_LIST_HEAD(&list);
159 list_for_each_entry(free, &context->mappings, mmu_node) {
160 /* If this vram node has not been used, skip this. */
161 if (!free->vram_node.mm)
162 continue;
163
164 /*
165 * If the iova is pinned, then it's in-use,
166 * so we must keep its mapping.
167 */
168 if (free->use)
169 continue;
170
171 list_add(&free->scan_node, &list);
172 if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
173 found = true;
174 break;
175 }
176 }
177
178 if (!found) {
179 /* Nothing found, clean up and fail */
180 list_for_each_entry_safe(m, n, &list, scan_node)
181 BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
182 break;
183 }
184
185 /*
186 * drm_mm does not allow any other operations while
187 * scanning, so we have to remove all blocks first.
188 * If drm_mm_scan_remove_block() returns false, we
189 * can leave the block pinned.
190 */
191 list_for_each_entry_safe(m, n, &list, scan_node)
192 if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
193 list_del_init(&m->scan_node);
194
195 /*
196 * Unmap the blocks which need to be reaped from the MMU.
197 * Clear the mmu pointer to prevent the mapping_get finding
198 * this mapping.
199 */
200 list_for_each_entry_safe(m, n, &list, scan_node) {
201 etnaviv_iommu_remove_mapping(context, m);
202 etnaviv_iommu_context_put(m->context);
203 m->context = NULL;
204 list_del_init(&m->mmu_node);
205 list_del_init(&m->scan_node);
206 }
207
208 mode = DRM_MM_INSERT_EVICT;
209
210 /*
211 * We removed enough mappings so that the new allocation will
212 * succeed, retry the allocation one more time.
213 */
214 }
215
216 return ret;
217 }
218
etnaviv_iommu_insert_exact(struct etnaviv_iommu_context * context,struct drm_mm_node * node,size_t size,u64 va)219 static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
220 struct drm_mm_node *node, size_t size, u64 va)
221 {
222 lockdep_assert_held(&context->lock);
223
224 return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
225 va + size, DRM_MM_INSERT_LOWEST);
226 }
227
etnaviv_iommu_map_gem(struct etnaviv_iommu_context * context,struct etnaviv_gem_object * etnaviv_obj,u32 memory_base,struct etnaviv_vram_mapping * mapping,u64 va)228 int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
229 struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
230 struct etnaviv_vram_mapping *mapping, u64 va)
231 {
232 struct sg_table *sgt = etnaviv_obj->sgt;
233 struct drm_mm_node *node;
234 int ret;
235
236 lockdep_assert_held(&etnaviv_obj->lock);
237
238 mutex_lock(&context->lock);
239
240 /* v1 MMU can optimize single entry (contiguous) scatterlists */
241 if (context->global->version == ETNAVIV_IOMMU_V1 &&
242 sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
243 u32 iova;
244
245 iova = sg_dma_address(sgt->sgl) - memory_base;
246 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
247 mapping->iova = iova;
248 list_add_tail(&mapping->mmu_node, &context->mappings);
249 ret = 0;
250 goto unlock;
251 }
252 }
253
254 node = &mapping->vram_node;
255
256 if (va)
257 ret = etnaviv_iommu_insert_exact(context, node,
258 etnaviv_obj->base.size, va);
259 else
260 ret = etnaviv_iommu_find_iova(context, node,
261 etnaviv_obj->base.size);
262 if (ret < 0)
263 goto unlock;
264
265 mapping->iova = node->start;
266 ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size,
267 ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
268
269 if (ret < 0) {
270 drm_mm_remove_node(node);
271 goto unlock;
272 }
273
274 list_add_tail(&mapping->mmu_node, &context->mappings);
275 context->flush_seq++;
276 unlock:
277 mutex_unlock(&context->lock);
278
279 return ret;
280 }
281
etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping)282 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
283 struct etnaviv_vram_mapping *mapping)
284 {
285 WARN_ON(mapping->use);
286
287 mutex_lock(&context->lock);
288
289 /* If the vram node is on the mm, unmap and remove the node */
290 if (mapping->vram_node.mm == &context->mm)
291 etnaviv_iommu_remove_mapping(context, mapping);
292
293 list_del(&mapping->mmu_node);
294 context->flush_seq++;
295 mutex_unlock(&context->lock);
296 }
297
etnaviv_iommu_context_free(struct kref * kref)298 static void etnaviv_iommu_context_free(struct kref *kref)
299 {
300 struct etnaviv_iommu_context *context =
301 container_of(kref, struct etnaviv_iommu_context, refcount);
302
303 etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
304
305 context->global->ops->free(context);
306 }
etnaviv_iommu_context_put(struct etnaviv_iommu_context * context)307 void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
308 {
309 kref_put(&context->refcount, etnaviv_iommu_context_free);
310 }
311
312 struct etnaviv_iommu_context *
etnaviv_iommu_context_init(struct etnaviv_iommu_global * global,struct etnaviv_cmdbuf_suballoc * suballoc)313 etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
314 struct etnaviv_cmdbuf_suballoc *suballoc)
315 {
316 struct etnaviv_iommu_context *ctx;
317 int ret;
318
319 if (global->version == ETNAVIV_IOMMU_V1)
320 ctx = etnaviv_iommuv1_context_alloc(global);
321 else
322 ctx = etnaviv_iommuv2_context_alloc(global);
323
324 if (!ctx)
325 return NULL;
326
327 ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
328 global->memory_base);
329 if (ret)
330 goto out_free;
331
332 if (global->version == ETNAVIV_IOMMU_V1 &&
333 ctx->cmdbuf_mapping.iova > 0x80000000) {
334 dev_err(global->dev,
335 "command buffer outside valid memory window\n");
336 goto out_unmap;
337 }
338
339 return ctx;
340
341 out_unmap:
342 etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
343 out_free:
344 global->ops->free(ctx);
345 return NULL;
346 }
347
etnaviv_iommu_restore(struct etnaviv_gpu * gpu,struct etnaviv_iommu_context * context)348 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
349 struct etnaviv_iommu_context *context)
350 {
351 context->global->ops->restore(gpu, context);
352 }
353
etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping,u32 memory_base,dma_addr_t paddr,size_t size)354 int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
355 struct etnaviv_vram_mapping *mapping,
356 u32 memory_base, dma_addr_t paddr,
357 size_t size)
358 {
359 mutex_lock(&context->lock);
360
361 if (mapping->use > 0) {
362 mapping->use++;
363 mutex_unlock(&context->lock);
364 return 0;
365 }
366
367 /*
368 * For MMUv1 we don't add the suballoc region to the pagetables, as
369 * those GPUs can only work with cmdbufs accessed through the linear
370 * window. Instead we manufacture a mapping to make it look uniform
371 * to the upper layers.
372 */
373 if (context->global->version == ETNAVIV_IOMMU_V1) {
374 mapping->iova = paddr - memory_base;
375 } else {
376 struct drm_mm_node *node = &mapping->vram_node;
377 int ret;
378
379 ret = etnaviv_iommu_find_iova(context, node, size);
380 if (ret < 0) {
381 mutex_unlock(&context->lock);
382 return ret;
383 }
384
385 mapping->iova = node->start;
386 ret = etnaviv_context_map(context, node->start, paddr, size,
387 ETNAVIV_PROT_READ);
388 if (ret < 0) {
389 drm_mm_remove_node(node);
390 mutex_unlock(&context->lock);
391 return ret;
392 }
393
394 context->flush_seq++;
395 }
396
397 list_add_tail(&mapping->mmu_node, &context->mappings);
398 mapping->use = 1;
399
400 mutex_unlock(&context->lock);
401
402 return 0;
403 }
404
etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping)405 void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
406 struct etnaviv_vram_mapping *mapping)
407 {
408 struct drm_mm_node *node = &mapping->vram_node;
409
410 mutex_lock(&context->lock);
411 mapping->use--;
412
413 if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
414 mutex_unlock(&context->lock);
415 return;
416 }
417
418 etnaviv_context_unmap(context, node->start, node->size);
419 drm_mm_remove_node(node);
420 mutex_unlock(&context->lock);
421 }
422
etnaviv_iommu_dump_size(struct etnaviv_iommu_context * context)423 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
424 {
425 return context->global->ops->dump_size(context);
426 }
427
etnaviv_iommu_dump(struct etnaviv_iommu_context * context,void * buf)428 void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
429 {
430 context->global->ops->dump(context, buf);
431 }
432
etnaviv_iommu_global_init(struct etnaviv_gpu * gpu)433 int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
434 {
435 enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
436 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
437 struct etnaviv_iommu_global *global;
438 struct device *dev = gpu->drm->dev;
439
440 if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
441 version = ETNAVIV_IOMMU_V2;
442
443 if (priv->mmu_global) {
444 if (priv->mmu_global->version != version) {
445 dev_err(gpu->dev,
446 "MMU version doesn't match global version\n");
447 return -ENXIO;
448 }
449
450 priv->mmu_global->use++;
451 return 0;
452 }
453
454 global = kzalloc(sizeof(*global), GFP_KERNEL);
455 if (!global)
456 return -ENOMEM;
457
458 global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
459 GFP_KERNEL);
460 if (!global->bad_page_cpu)
461 goto free_global;
462
463 memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
464
465 if (version == ETNAVIV_IOMMU_V2) {
466 global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
467 &global->v2.pta_dma, GFP_KERNEL);
468 if (!global->v2.pta_cpu)
469 goto free_bad_page;
470 }
471
472 global->dev = dev;
473 global->version = version;
474 global->use = 1;
475 mutex_init(&global->lock);
476
477 if (version == ETNAVIV_IOMMU_V1)
478 global->ops = &etnaviv_iommuv1_ops;
479 else
480 global->ops = &etnaviv_iommuv2_ops;
481
482 priv->mmu_global = global;
483
484 return 0;
485
486 free_bad_page:
487 dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
488 free_global:
489 kfree(global);
490
491 return -ENOMEM;
492 }
493
etnaviv_iommu_global_fini(struct etnaviv_gpu * gpu)494 void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
495 {
496 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
497 struct etnaviv_iommu_global *global = priv->mmu_global;
498
499 if (--global->use > 0)
500 return;
501
502 if (global->v2.pta_cpu)
503 dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
504 global->v2.pta_cpu, global->v2.pta_dma);
505
506 if (global->bad_page_cpu)
507 dma_free_wc(global->dev, SZ_4K,
508 global->bad_page_cpu, global->bad_page_dma);
509
510 mutex_destroy(&global->lock);
511 kfree(global);
512
513 priv->mmu_global = NULL;
514 }
515