Lines Matching +full:linear +full:- +full:mapping +full:- +full:mode
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2015-2018 Etnaviv Project
6 #include <linux/dma-mapping.h>
29 unmapped_page = context->global->ops->unmap(context, iova, in etnaviv_context_unmap()
51 return -EINVAL; in etnaviv_context_map()
55 ret = context->global->ops->map(context, iova, paddr, pgsize, in etnaviv_context_map()
62 size -= pgsize; in etnaviv_context_map()
65 /* unroll mapping in case something went wrong */ in etnaviv_context_map()
67 etnaviv_context_unmap(context, orig_iova, orig_size - size); in etnaviv_context_map()
80 return -EINVAL; in etnaviv_iommu_map()
83 u32 pa = sg_dma_address(sg) - sg->offset; in etnaviv_iommu_map()
84 size_t bytes = sg_dma_len(sg) + sg->offset; in etnaviv_iommu_map()
98 etnaviv_context_unmap(context, iova, da - iova); in etnaviv_iommu_map()
110 size_t bytes = sg_dma_len(sg) + sg->offset; in etnaviv_iommu_unmap()
123 struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_remove_mapping() argument
125 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_iommu_remove_mapping()
127 lockdep_assert_held(&context->lock); in etnaviv_iommu_remove_mapping()
129 etnaviv_iommu_unmap(context, mapping->vram_node.start, in etnaviv_iommu_remove_mapping()
130 etnaviv_obj->sgt, etnaviv_obj->base.size); in etnaviv_iommu_remove_mapping()
131 drm_mm_remove_node(&mapping->vram_node); in etnaviv_iommu_remove_mapping()
138 enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW; in etnaviv_iommu_find_iova() local
141 lockdep_assert_held(&context->lock); in etnaviv_iommu_find_iova()
149 ret = drm_mm_insert_node_in_range(&context->mm, node, in etnaviv_iommu_find_iova()
150 size, 0, 0, 0, U64_MAX, mode); in etnaviv_iommu_find_iova()
151 if (ret != -ENOSPC) in etnaviv_iommu_find_iova()
155 drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode); in etnaviv_iommu_find_iova()
159 list_for_each_entry(free, &context->mappings, mmu_node) { in etnaviv_iommu_find_iova()
161 if (!free->vram_node.mm) in etnaviv_iommu_find_iova()
165 * If the iova is pinned, then it's in-use, in etnaviv_iommu_find_iova()
166 * so we must keep its mapping. in etnaviv_iommu_find_iova()
168 if (free->use) in etnaviv_iommu_find_iova()
171 list_add(&free->scan_node, &list); in etnaviv_iommu_find_iova()
172 if (drm_mm_scan_add_block(&scan, &free->vram_node)) { in etnaviv_iommu_find_iova()
181 BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node)); in etnaviv_iommu_find_iova()
192 if (!drm_mm_scan_remove_block(&scan, &m->vram_node)) in etnaviv_iommu_find_iova()
193 list_del_init(&m->scan_node); in etnaviv_iommu_find_iova()
198 * this mapping. in etnaviv_iommu_find_iova()
202 etnaviv_iommu_context_put(m->context); in etnaviv_iommu_find_iova()
203 m->context = NULL; in etnaviv_iommu_find_iova()
204 list_del_init(&m->mmu_node); in etnaviv_iommu_find_iova()
205 list_del_init(&m->scan_node); in etnaviv_iommu_find_iova()
208 mode = DRM_MM_INSERT_EVICT; in etnaviv_iommu_find_iova()
222 lockdep_assert_held(&context->lock); in etnaviv_iommu_insert_exact()
224 return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va, in etnaviv_iommu_insert_exact()
230 struct etnaviv_vram_mapping *mapping, u64 va) in etnaviv_iommu_map_gem() argument
232 struct sg_table *sgt = etnaviv_obj->sgt; in etnaviv_iommu_map_gem()
236 lockdep_assert_held(&etnaviv_obj->lock); in etnaviv_iommu_map_gem()
238 mutex_lock(&context->lock); in etnaviv_iommu_map_gem()
241 if (context->global->version == ETNAVIV_IOMMU_V1 && in etnaviv_iommu_map_gem()
242 sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) { in etnaviv_iommu_map_gem()
245 iova = sg_dma_address(sgt->sgl) - memory_base; in etnaviv_iommu_map_gem()
246 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) { in etnaviv_iommu_map_gem()
247 mapping->iova = iova; in etnaviv_iommu_map_gem()
248 list_add_tail(&mapping->mmu_node, &context->mappings); in etnaviv_iommu_map_gem()
254 node = &mapping->vram_node; in etnaviv_iommu_map_gem()
258 etnaviv_obj->base.size, va); in etnaviv_iommu_map_gem()
261 etnaviv_obj->base.size); in etnaviv_iommu_map_gem()
265 mapping->iova = node->start; in etnaviv_iommu_map_gem()
266 ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size, in etnaviv_iommu_map_gem()
274 list_add_tail(&mapping->mmu_node, &context->mappings); in etnaviv_iommu_map_gem()
275 context->flush_seq++; in etnaviv_iommu_map_gem()
277 mutex_unlock(&context->lock); in etnaviv_iommu_map_gem()
283 struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_unmap_gem() argument
285 WARN_ON(mapping->use); in etnaviv_iommu_unmap_gem()
287 mutex_lock(&context->lock); in etnaviv_iommu_unmap_gem()
290 if (mapping->vram_node.mm == &context->mm) in etnaviv_iommu_unmap_gem()
291 etnaviv_iommu_remove_mapping(context, mapping); in etnaviv_iommu_unmap_gem()
293 list_del(&mapping->mmu_node); in etnaviv_iommu_unmap_gem()
294 context->flush_seq++; in etnaviv_iommu_unmap_gem()
295 mutex_unlock(&context->lock); in etnaviv_iommu_unmap_gem()
303 etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping); in etnaviv_iommu_context_free()
305 context->global->ops->free(context); in etnaviv_iommu_context_free()
309 kref_put(&context->refcount, etnaviv_iommu_context_free); in etnaviv_iommu_context_put()
319 if (global->version == ETNAVIV_IOMMU_V1) in etnaviv_iommu_context_init()
327 ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping, in etnaviv_iommu_context_init()
328 global->memory_base); in etnaviv_iommu_context_init()
332 if (global->version == ETNAVIV_IOMMU_V1 && in etnaviv_iommu_context_init()
333 ctx->cmdbuf_mapping.iova > 0x80000000) { in etnaviv_iommu_context_init()
334 dev_err(global->dev, in etnaviv_iommu_context_init()
342 etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping); in etnaviv_iommu_context_init()
344 global->ops->free(ctx); in etnaviv_iommu_context_init()
351 context->global->ops->restore(gpu, context); in etnaviv_iommu_restore()
355 struct etnaviv_vram_mapping *mapping, in etnaviv_iommu_get_suballoc_va() argument
359 mutex_lock(&context->lock); in etnaviv_iommu_get_suballoc_va()
361 if (mapping->use > 0) { in etnaviv_iommu_get_suballoc_va()
362 mapping->use++; in etnaviv_iommu_get_suballoc_va()
363 mutex_unlock(&context->lock); in etnaviv_iommu_get_suballoc_va()
369 * those GPUs can only work with cmdbufs accessed through the linear in etnaviv_iommu_get_suballoc_va()
370 * window. Instead we manufacture a mapping to make it look uniform in etnaviv_iommu_get_suballoc_va()
373 if (context->global->version == ETNAVIV_IOMMU_V1) { in etnaviv_iommu_get_suballoc_va()
374 mapping->iova = paddr - memory_base; in etnaviv_iommu_get_suballoc_va()
376 struct drm_mm_node *node = &mapping->vram_node; in etnaviv_iommu_get_suballoc_va()
381 mutex_unlock(&context->lock); in etnaviv_iommu_get_suballoc_va()
385 mapping->iova = node->start; in etnaviv_iommu_get_suballoc_va()
386 ret = etnaviv_context_map(context, node->start, paddr, size, in etnaviv_iommu_get_suballoc_va()
390 mutex_unlock(&context->lock); in etnaviv_iommu_get_suballoc_va()
394 context->flush_seq++; in etnaviv_iommu_get_suballoc_va()
397 list_add_tail(&mapping->mmu_node, &context->mappings); in etnaviv_iommu_get_suballoc_va()
398 mapping->use = 1; in etnaviv_iommu_get_suballoc_va()
400 mutex_unlock(&context->lock); in etnaviv_iommu_get_suballoc_va()
406 struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_put_suballoc_va() argument
408 struct drm_mm_node *node = &mapping->vram_node; in etnaviv_iommu_put_suballoc_va()
410 mutex_lock(&context->lock); in etnaviv_iommu_put_suballoc_va()
411 mapping->use--; in etnaviv_iommu_put_suballoc_va()
413 if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) { in etnaviv_iommu_put_suballoc_va()
414 mutex_unlock(&context->lock); in etnaviv_iommu_put_suballoc_va()
418 etnaviv_context_unmap(context, node->start, node->size); in etnaviv_iommu_put_suballoc_va()
420 mutex_unlock(&context->lock); in etnaviv_iommu_put_suballoc_va()
425 return context->global->ops->dump_size(context); in etnaviv_iommu_dump_size()
430 context->global->ops->dump(context, buf); in etnaviv_iommu_dump()
436 struct etnaviv_drm_private *priv = gpu->drm->dev_private; in etnaviv_iommu_global_init()
438 struct device *dev = gpu->drm->dev; in etnaviv_iommu_global_init()
440 if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION) in etnaviv_iommu_global_init()
443 if (priv->mmu_global) { in etnaviv_iommu_global_init()
444 if (priv->mmu_global->version != version) { in etnaviv_iommu_global_init()
445 dev_err(gpu->dev, in etnaviv_iommu_global_init()
447 return -ENXIO; in etnaviv_iommu_global_init()
450 priv->mmu_global->use++; in etnaviv_iommu_global_init()
456 return -ENOMEM; in etnaviv_iommu_global_init()
458 global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma, in etnaviv_iommu_global_init()
460 if (!global->bad_page_cpu) in etnaviv_iommu_global_init()
463 memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32)); in etnaviv_iommu_global_init()
466 global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE, in etnaviv_iommu_global_init()
467 &global->v2.pta_dma, GFP_KERNEL); in etnaviv_iommu_global_init()
468 if (!global->v2.pta_cpu) in etnaviv_iommu_global_init()
472 global->dev = dev; in etnaviv_iommu_global_init()
473 global->version = version; in etnaviv_iommu_global_init()
474 global->use = 1; in etnaviv_iommu_global_init()
475 mutex_init(&global->lock); in etnaviv_iommu_global_init()
478 global->ops = &etnaviv_iommuv1_ops; in etnaviv_iommu_global_init()
480 global->ops = &etnaviv_iommuv2_ops; in etnaviv_iommu_global_init()
482 priv->mmu_global = global; in etnaviv_iommu_global_init()
487 dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma); in etnaviv_iommu_global_init()
491 return -ENOMEM; in etnaviv_iommu_global_init()
496 struct etnaviv_drm_private *priv = gpu->drm->dev_private; in etnaviv_iommu_global_fini()
497 struct etnaviv_iommu_global *global = priv->mmu_global; in etnaviv_iommu_global_fini()
499 if (--global->use > 0) in etnaviv_iommu_global_fini()
502 if (global->v2.pta_cpu) in etnaviv_iommu_global_fini()
503 dma_free_wc(global->dev, ETNAVIV_PTA_SIZE, in etnaviv_iommu_global_fini()
504 global->v2.pta_cpu, global->v2.pta_dma); in etnaviv_iommu_global_fini()
506 if (global->bad_page_cpu) in etnaviv_iommu_global_fini()
507 dma_free_wc(global->dev, SZ_4K, in etnaviv_iommu_global_fini()
508 global->bad_page_cpu, global->bad_page_dma); in etnaviv_iommu_global_fini()
510 mutex_destroy(&global->lock); in etnaviv_iommu_global_fini()
513 priv->mmu_global = NULL; in etnaviv_iommu_global_fini()