Lines Matching refs:msm_obj
25 struct msm_gem_object *msm_obj = to_msm_bo(obj); in physaddr() local
27 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + in physaddr()
33 struct msm_gem_object *msm_obj = to_msm_bo(obj); in use_pages() local
34 return !msm_obj->vram_node; in use_pages()
51 static void sync_for_device(struct msm_gem_object *msm_obj) in sync_for_device() argument
53 struct device *dev = msm_obj->base.dev->dev; in sync_for_device()
56 dma_sync_sg_for_device(dev, msm_obj->sgt->sgl, in sync_for_device()
57 msm_obj->sgt->nents, DMA_BIDIRECTIONAL); in sync_for_device()
59 dma_map_sg(dev, msm_obj->sgt->sgl, in sync_for_device()
60 msm_obj->sgt->nents, DMA_BIDIRECTIONAL); in sync_for_device()
64 static void sync_for_cpu(struct msm_gem_object *msm_obj) in sync_for_cpu() argument
66 struct device *dev = msm_obj->base.dev->dev; in sync_for_cpu()
69 dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl, in sync_for_cpu()
70 msm_obj->sgt->nents, DMA_BIDIRECTIONAL); in sync_for_cpu()
72 dma_unmap_sg(dev, msm_obj->sgt->sgl, in sync_for_cpu()
73 msm_obj->sgt->nents, DMA_BIDIRECTIONAL); in sync_for_cpu()
80 struct msm_gem_object *msm_obj = to_msm_bo(obj); in get_pages_vram() local
91 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); in get_pages_vram()
109 struct msm_gem_object *msm_obj = to_msm_bo(obj); in get_pages() local
111 if (!msm_obj->pages) { in get_pages()
127 msm_obj->pages = p; in get_pages()
129 msm_obj->sgt = drm_prime_pages_to_sg(p, npages); in get_pages()
130 if (IS_ERR(msm_obj->sgt)) { in get_pages()
131 void *ptr = ERR_CAST(msm_obj->sgt); in get_pages()
134 msm_obj->sgt = NULL; in get_pages()
141 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) in get_pages()
142 sync_for_device(msm_obj); in get_pages()
145 return msm_obj->pages; in get_pages()
150 struct msm_gem_object *msm_obj = to_msm_bo(obj); in put_pages_vram() local
154 drm_mm_remove_node(msm_obj->vram_node); in put_pages_vram()
157 kvfree(msm_obj->pages); in put_pages_vram()
162 struct msm_gem_object *msm_obj = to_msm_bo(obj); in put_pages() local
164 if (msm_obj->pages) { in put_pages()
165 if (msm_obj->sgt) { in put_pages()
170 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) in put_pages()
171 sync_for_cpu(msm_obj); in put_pages()
173 sg_free_table(msm_obj->sgt); in put_pages()
174 kfree(msm_obj->sgt); in put_pages()
178 drm_gem_put_pages(obj, msm_obj->pages, true, false); in put_pages()
182 msm_obj->pages = NULL; in put_pages()
188 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_get_pages() local
191 mutex_lock(&msm_obj->lock); in msm_gem_get_pages()
193 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { in msm_gem_get_pages()
194 mutex_unlock(&msm_obj->lock); in msm_gem_get_pages()
199 mutex_unlock(&msm_obj->lock); in msm_gem_get_pages()
211 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_mmap_obj() local
216 if (msm_obj->flags & MSM_BO_WC) { in msm_gem_mmap_obj()
218 } else if (msm_obj->flags & MSM_BO_UNCACHED) { in msm_gem_mmap_obj()
254 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_fault() local
265 err = mutex_lock_interruptible(&msm_obj->lock); in msm_gem_fault()
271 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { in msm_gem_fault()
272 mutex_unlock(&msm_obj->lock); in msm_gem_fault()
293 mutex_unlock(&msm_obj->lock); in msm_gem_fault()
302 struct msm_gem_object *msm_obj = to_msm_bo(obj); in mmap_offset() local
305 WARN_ON(!mutex_is_locked(&msm_obj->lock)); in mmap_offset()
321 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_mmap_offset() local
323 mutex_lock(&msm_obj->lock); in msm_gem_mmap_offset()
325 mutex_unlock(&msm_obj->lock); in msm_gem_mmap_offset()
332 struct msm_gem_object *msm_obj = to_msm_bo(obj); in add_vma() local
335 WARN_ON(!mutex_is_locked(&msm_obj->lock)); in add_vma()
343 list_add_tail(&vma->list, &msm_obj->vmas); in add_vma()
351 struct msm_gem_object *msm_obj = to_msm_bo(obj); in lookup_vma() local
354 WARN_ON(!mutex_is_locked(&msm_obj->lock)); in lookup_vma()
356 list_for_each_entry(vma, &msm_obj->vmas, list) { in lookup_vma()
377 struct msm_gem_object *msm_obj = to_msm_bo(obj); in put_iova() local
380 WARN_ON(!mutex_is_locked(&msm_obj->lock)); in put_iova()
382 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { in put_iova()
394 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_get_iova_locked() local
398 WARN_ON(!mutex_is_locked(&msm_obj->lock)); in msm_gem_get_iova_locked()
421 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_pin_iova() local
426 if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) in msm_gem_pin_iova()
429 WARN_ON(!mutex_is_locked(&msm_obj->lock)); in msm_gem_pin_iova()
431 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) in msm_gem_pin_iova()
443 msm_obj->sgt, obj->size >> PAGE_SHIFT); in msm_gem_pin_iova()
450 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_get_and_pin_iova() local
454 mutex_lock(&msm_obj->lock); in msm_gem_get_and_pin_iova()
464 mutex_unlock(&msm_obj->lock); in msm_gem_get_and_pin_iova()
475 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_get_iova() local
478 mutex_lock(&msm_obj->lock); in msm_gem_get_iova()
480 mutex_unlock(&msm_obj->lock); in msm_gem_get_iova()
491 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_iova() local
494 mutex_lock(&msm_obj->lock); in msm_gem_iova()
496 mutex_unlock(&msm_obj->lock); in msm_gem_iova()
510 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_unpin_iova() local
513 mutex_lock(&msm_obj->lock); in msm_gem_unpin_iova()
519 mutex_unlock(&msm_obj->lock); in msm_gem_unpin_iova()
554 struct msm_gem_object *msm_obj = to_msm_bo(obj); in get_vaddr() local
557 mutex_lock(&msm_obj->lock); in get_vaddr()
559 if (WARN_ON(msm_obj->madv > madv)) { in get_vaddr()
561 msm_obj->madv, madv); in get_vaddr()
562 mutex_unlock(&msm_obj->lock); in get_vaddr()
572 msm_obj->vmap_count++; in get_vaddr()
574 if (!msm_obj->vaddr) { in get_vaddr()
580 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, in get_vaddr()
582 if (msm_obj->vaddr == NULL) { in get_vaddr()
588 mutex_unlock(&msm_obj->lock); in get_vaddr()
589 return msm_obj->vaddr; in get_vaddr()
592 msm_obj->vmap_count--; in get_vaddr()
593 mutex_unlock(&msm_obj->lock); in get_vaddr()
615 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_put_vaddr() local
617 mutex_lock(&msm_obj->lock); in msm_gem_put_vaddr()
618 WARN_ON(msm_obj->vmap_count < 1); in msm_gem_put_vaddr()
619 msm_obj->vmap_count--; in msm_gem_put_vaddr()
620 mutex_unlock(&msm_obj->lock); in msm_gem_put_vaddr()
628 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_madvise() local
630 mutex_lock(&msm_obj->lock); in msm_gem_madvise()
634 if (msm_obj->madv != __MSM_MADV_PURGED) in msm_gem_madvise()
635 msm_obj->madv = madv; in msm_gem_madvise()
637 madv = msm_obj->madv; in msm_gem_madvise()
639 mutex_unlock(&msm_obj->lock); in msm_gem_madvise()
647 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_purge() local
650 WARN_ON(!is_purgeable(msm_obj)); in msm_gem_purge()
653 mutex_lock_nested(&msm_obj->lock, subclass); in msm_gem_purge()
661 msm_obj->madv = __MSM_MADV_PURGED; in msm_gem_purge()
676 mutex_unlock(&msm_obj->lock); in msm_gem_purge()
681 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_vunmap_locked() local
683 WARN_ON(!mutex_is_locked(&msm_obj->lock)); in msm_gem_vunmap_locked()
685 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) in msm_gem_vunmap_locked()
688 vunmap(msm_obj->vaddr); in msm_gem_vunmap_locked()
689 msm_obj->vaddr = NULL; in msm_gem_vunmap_locked()
694 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_vunmap() local
696 mutex_lock_nested(&msm_obj->lock, subclass); in msm_gem_vunmap()
698 mutex_unlock(&msm_obj->lock); in msm_gem_vunmap()
739 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_move_to_active() local
740 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); in msm_gem_move_to_active()
741 msm_obj->gpu = gpu; in msm_gem_move_to_active()
746 list_del_init(&msm_obj->mm_list); in msm_gem_move_to_active()
747 list_add_tail(&msm_obj->mm_list, &gpu->active_list); in msm_gem_move_to_active()
754 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_move_to_inactive() local
758 msm_obj->gpu = NULL; in msm_gem_move_to_inactive()
759 list_del_init(&msm_obj->mm_list); in msm_gem_move_to_inactive()
760 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); in msm_gem_move_to_inactive()
801 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_describe() local
809 mutex_lock(&msm_obj->lock); in msm_gem_describe()
811 switch (msm_obj->madv) { in msm_gem_describe()
825 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', in msm_gem_describe()
827 off, msm_obj->vaddr); in msm_gem_describe()
829 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); in msm_gem_describe()
831 if (!list_empty(&msm_obj->vmas)) { in msm_gem_describe()
835 list_for_each_entry(vma, &msm_obj->vmas, list) in msm_gem_describe()
860 mutex_unlock(&msm_obj->lock); in msm_gem_describe()
865 struct msm_gem_object *msm_obj; in msm_gem_describe_objects() local
870 list_for_each_entry(msm_obj, list, mm_list) { in msm_gem_describe_objects()
871 struct drm_gem_object *obj = &msm_obj->base; in msm_gem_describe_objects()
885 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_free_object() local
889 if (llist_add(&msm_obj->freed, &priv->free_list)) in msm_gem_free_object()
893 static void free_object(struct msm_gem_object *msm_obj) in free_object() argument
895 struct drm_gem_object *obj = &msm_obj->base; in free_object()
901 WARN_ON(is_active(msm_obj)); in free_object()
903 list_del(&msm_obj->mm_list); in free_object()
905 mutex_lock(&msm_obj->lock); in free_object()
910 if (msm_obj->vaddr) in free_object()
911 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); in free_object()
916 if (msm_obj->pages) in free_object()
917 kvfree(msm_obj->pages); in free_object()
919 drm_prime_gem_destroy(obj, msm_obj->sgt); in free_object()
927 mutex_unlock(&msm_obj->lock); in free_object()
928 kfree(msm_obj); in free_object()
937 struct msm_gem_object *msm_obj, *next; in msm_gem_free_work() local
943 llist_for_each_entry_safe(msm_obj, next, in msm_gem_free_work()
945 free_object(msm_obj); in msm_gem_free_work()
984 struct msm_gem_object *msm_obj; in msm_gem_new_impl() local
997 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); in msm_gem_new_impl()
998 if (!msm_obj) in msm_gem_new_impl()
1001 mutex_init(&msm_obj->lock); in msm_gem_new_impl()
1003 msm_obj->flags = flags; in msm_gem_new_impl()
1004 msm_obj->madv = MSM_MADV_WILLNEED; in msm_gem_new_impl()
1006 INIT_LIST_HEAD(&msm_obj->submit_entry); in msm_gem_new_impl()
1007 INIT_LIST_HEAD(&msm_obj->vmas); in msm_gem_new_impl()
1011 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); in msm_gem_new_impl()
1014 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); in msm_gem_new_impl()
1018 *obj = &msm_obj->base; in msm_gem_new_impl()
1054 struct msm_gem_object *msm_obj = to_msm_bo(obj); in _msm_gem_new() local
1056 mutex_lock(&msm_obj->lock); in _msm_gem_new()
1059 mutex_unlock(&msm_obj->lock); in _msm_gem_new()
1111 struct msm_gem_object *msm_obj; in msm_gem_import() local
1132 msm_obj = to_msm_bo(obj); in msm_gem_import()
1133 mutex_lock(&msm_obj->lock); in msm_gem_import()
1134 msm_obj->sgt = sgt; in msm_gem_import()
1135 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); in msm_gem_import()
1136 if (!msm_obj->pages) { in msm_gem_import()
1137 mutex_unlock(&msm_obj->lock); in msm_gem_import()
1142 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); in msm_gem_import()
1144 mutex_unlock(&msm_obj->lock); in msm_gem_import()
1148 mutex_unlock(&msm_obj->lock); in msm_gem_import()
1225 struct msm_gem_object *msm_obj = to_msm_bo(bo); in msm_gem_object_set_name() local
1232 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap); in msm_gem_object_set_name()