Lines Matching refs:msm_obj

26 	struct msm_gem_object *msm_obj = to_msm_bo(obj);  in physaddr()  local
28 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + in physaddr()
34 struct msm_gem_object *msm_obj = to_msm_bo(obj); in use_pages() local
35 return !msm_obj->vram_node; in use_pages()
52 static void sync_for_device(struct msm_gem_object *msm_obj) in sync_for_device() argument
54 struct device *dev = msm_obj->base.dev->dev; in sync_for_device()
56 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); in sync_for_device()
59 static void sync_for_cpu(struct msm_gem_object *msm_obj) in sync_for_cpu() argument
61 struct device *dev = msm_obj->base.dev->dev; in sync_for_cpu()
63 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); in sync_for_cpu()
69 struct msm_gem_object *msm_obj = to_msm_bo(obj); in get_pages_vram() local
80 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); in get_pages_vram()
98 struct msm_gem_object *msm_obj = to_msm_bo(obj); in get_pages() local
100 if (!msm_obj->pages) { in get_pages()
116 msm_obj->pages = p; in get_pages()
118 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages); in get_pages()
119 if (IS_ERR(msm_obj->sgt)) { in get_pages()
120 void *ptr = ERR_CAST(msm_obj->sgt); in get_pages()
123 msm_obj->sgt = NULL; in get_pages()
130 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) in get_pages()
131 sync_for_device(msm_obj); in get_pages()
134 return msm_obj->pages; in get_pages()
139 struct msm_gem_object *msm_obj = to_msm_bo(obj); in put_pages_vram() local
143 drm_mm_remove_node(msm_obj->vram_node); in put_pages_vram()
146 kvfree(msm_obj->pages); in put_pages_vram()
151 struct msm_gem_object *msm_obj = to_msm_bo(obj); in put_pages() local
153 if (msm_obj->pages) { in put_pages()
154 if (msm_obj->sgt) { in put_pages()
159 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) in put_pages()
160 sync_for_cpu(msm_obj); in put_pages()
162 sg_free_table(msm_obj->sgt); in put_pages()
163 kfree(msm_obj->sgt); in put_pages()
167 drm_gem_put_pages(obj, msm_obj->pages, true, false); in put_pages()
171 msm_obj->pages = NULL; in put_pages()
177 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_get_pages() local
180 mutex_lock(&msm_obj->lock); in msm_gem_get_pages()
182 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { in msm_gem_get_pages()
183 mutex_unlock(&msm_obj->lock); in msm_gem_get_pages()
188 mutex_unlock(&msm_obj->lock); in msm_gem_get_pages()
200 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_mmap_obj() local
205 if (msm_obj->flags & MSM_BO_WC) { in msm_gem_mmap_obj()
207 } else if (msm_obj->flags & MSM_BO_UNCACHED) { in msm_gem_mmap_obj()
243 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_fault() local
254 err = mutex_lock_interruptible(&msm_obj->lock); in msm_gem_fault()
260 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { in msm_gem_fault()
261 mutex_unlock(&msm_obj->lock); in msm_gem_fault()
282 mutex_unlock(&msm_obj->lock); in msm_gem_fault()
291 struct msm_gem_object *msm_obj = to_msm_bo(obj); in mmap_offset() local
294 WARN_ON(!mutex_is_locked(&msm_obj->lock)); in mmap_offset()
310 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_mmap_offset() local
312 mutex_lock(&msm_obj->lock); in msm_gem_mmap_offset()
314 mutex_unlock(&msm_obj->lock); in msm_gem_mmap_offset()
321 struct msm_gem_object *msm_obj = to_msm_bo(obj); in add_vma() local
324 WARN_ON(!mutex_is_locked(&msm_obj->lock)); in add_vma()
332 list_add_tail(&vma->list, &msm_obj->vmas); in add_vma()
340 struct msm_gem_object *msm_obj = to_msm_bo(obj); in lookup_vma() local
343 WARN_ON(!mutex_is_locked(&msm_obj->lock)); in lookup_vma()
345 list_for_each_entry(vma, &msm_obj->vmas, list) { in lookup_vma()
366 struct msm_gem_object *msm_obj = to_msm_bo(obj); in put_iova() local
369 WARN_ON(!mutex_is_locked(&msm_obj->lock)); in put_iova()
371 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { in put_iova()
384 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_get_iova_locked() local
388 WARN_ON(!mutex_is_locked(&msm_obj->lock)); in msm_gem_get_iova_locked()
412 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_pin_iova() local
417 if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) in msm_gem_pin_iova()
420 if (msm_obj->flags & MSM_BO_MAP_PRIV) in msm_gem_pin_iova()
423 WARN_ON(!mutex_is_locked(&msm_obj->lock)); in msm_gem_pin_iova()
425 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) in msm_gem_pin_iova()
437 msm_obj->sgt, obj->size >> PAGE_SHIFT); in msm_gem_pin_iova()
448 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_get_and_pin_iova_range() local
452 mutex_lock(&msm_obj->lock); in msm_gem_get_and_pin_iova_range()
463 mutex_unlock(&msm_obj->lock); in msm_gem_get_and_pin_iova_range()
481 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_get_iova() local
484 mutex_lock(&msm_obj->lock); in msm_gem_get_iova()
486 mutex_unlock(&msm_obj->lock); in msm_gem_get_iova()
497 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_iova() local
500 mutex_lock(&msm_obj->lock); in msm_gem_iova()
502 mutex_unlock(&msm_obj->lock); in msm_gem_iova()
516 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_unpin_iova() local
519 mutex_lock(&msm_obj->lock); in msm_gem_unpin_iova()
525 mutex_unlock(&msm_obj->lock); in msm_gem_unpin_iova()
560 struct msm_gem_object *msm_obj = to_msm_bo(obj); in get_vaddr() local
566 mutex_lock(&msm_obj->lock); in get_vaddr()
568 if (WARN_ON(msm_obj->madv > madv)) { in get_vaddr()
570 msm_obj->madv, madv); in get_vaddr()
571 mutex_unlock(&msm_obj->lock); in get_vaddr()
581 msm_obj->vmap_count++; in get_vaddr()
583 if (!msm_obj->vaddr) { in get_vaddr()
589 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, in get_vaddr()
591 if (msm_obj->vaddr == NULL) { in get_vaddr()
597 mutex_unlock(&msm_obj->lock); in get_vaddr()
598 return msm_obj->vaddr; in get_vaddr()
601 msm_obj->vmap_count--; in get_vaddr()
602 mutex_unlock(&msm_obj->lock); in get_vaddr()
624 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_put_vaddr() local
626 mutex_lock(&msm_obj->lock); in msm_gem_put_vaddr()
627 WARN_ON(msm_obj->vmap_count < 1); in msm_gem_put_vaddr()
628 msm_obj->vmap_count--; in msm_gem_put_vaddr()
629 mutex_unlock(&msm_obj->lock); in msm_gem_put_vaddr()
637 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_madvise() local
639 mutex_lock(&msm_obj->lock); in msm_gem_madvise()
643 if (msm_obj->madv != __MSM_MADV_PURGED) in msm_gem_madvise()
644 msm_obj->madv = madv; in msm_gem_madvise()
646 madv = msm_obj->madv; in msm_gem_madvise()
648 mutex_unlock(&msm_obj->lock); in msm_gem_madvise()
656 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_purge() local
659 WARN_ON(!is_purgeable(msm_obj)); in msm_gem_purge()
662 mutex_lock_nested(&msm_obj->lock, subclass); in msm_gem_purge()
670 msm_obj->madv = __MSM_MADV_PURGED; in msm_gem_purge()
685 mutex_unlock(&msm_obj->lock); in msm_gem_purge()
690 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_vunmap_locked() local
692 WARN_ON(!mutex_is_locked(&msm_obj->lock)); in msm_gem_vunmap_locked()
694 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) in msm_gem_vunmap_locked()
697 vunmap(msm_obj->vaddr); in msm_gem_vunmap_locked()
698 msm_obj->vaddr = NULL; in msm_gem_vunmap_locked()
703 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_vunmap() local
705 mutex_lock_nested(&msm_obj->lock, subclass); in msm_gem_vunmap()
707 mutex_unlock(&msm_obj->lock); in msm_gem_vunmap()
747 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_active_get() local
749 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); in msm_gem_active_get()
751 if (!atomic_fetch_inc(&msm_obj->active_count)) { in msm_gem_active_get()
752 msm_obj->gpu = gpu; in msm_gem_active_get()
753 list_del_init(&msm_obj->mm_list); in msm_gem_active_get()
754 list_add_tail(&msm_obj->mm_list, &gpu->active_list); in msm_gem_active_get()
760 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_active_put() local
765 if (!atomic_dec_return(&msm_obj->active_count)) { in msm_gem_active_put()
766 msm_obj->gpu = NULL; in msm_gem_active_put()
767 list_del_init(&msm_obj->mm_list); in msm_gem_active_put()
768 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); in msm_gem_active_put()
810 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_describe() local
818 mutex_lock(&msm_obj->lock); in msm_gem_describe()
820 switch (msm_obj->madv) { in msm_gem_describe()
834 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', in msm_gem_describe()
836 off, msm_obj->vaddr); in msm_gem_describe()
838 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); in msm_gem_describe()
840 if (!list_empty(&msm_obj->vmas)) { in msm_gem_describe()
844 list_for_each_entry(vma, &msm_obj->vmas, list) { in msm_gem_describe()
886 mutex_unlock(&msm_obj->lock); in msm_gem_describe()
891 struct msm_gem_object *msm_obj; in msm_gem_describe_objects() local
896 list_for_each_entry(msm_obj, list, mm_list) { in msm_gem_describe_objects()
897 struct drm_gem_object *obj = &msm_obj->base; in msm_gem_describe_objects()
911 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_free_object() local
915 if (llist_add(&msm_obj->freed, &priv->free_list)) in msm_gem_free_object()
919 static void free_object(struct msm_gem_object *msm_obj) in free_object() argument
921 struct drm_gem_object *obj = &msm_obj->base; in free_object()
927 WARN_ON(is_active(msm_obj)); in free_object()
929 list_del(&msm_obj->mm_list); in free_object()
931 mutex_lock(&msm_obj->lock); in free_object()
936 WARN_ON(msm_obj->vaddr); in free_object()
941 if (msm_obj->pages) in free_object()
942 kvfree(msm_obj->pages); in free_object()
944 drm_prime_gem_destroy(obj, msm_obj->sgt); in free_object()
952 mutex_unlock(&msm_obj->lock); in free_object()
953 kfree(msm_obj); in free_object()
962 struct msm_gem_object *msm_obj, *next; in msm_gem_free_work() local
968 llist_for_each_entry_safe(msm_obj, next, in msm_gem_free_work()
970 free_object(msm_obj); in msm_gem_free_work()
1007 struct msm_gem_object *msm_obj; in msm_gem_new_impl() local
1020 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); in msm_gem_new_impl()
1021 if (!msm_obj) in msm_gem_new_impl()
1024 mutex_init(&msm_obj->lock); in msm_gem_new_impl()
1026 msm_obj->flags = flags; in msm_gem_new_impl()
1027 msm_obj->madv = MSM_MADV_WILLNEED; in msm_gem_new_impl()
1029 INIT_LIST_HEAD(&msm_obj->submit_entry); in msm_gem_new_impl()
1030 INIT_LIST_HEAD(&msm_obj->vmas); in msm_gem_new_impl()
1032 *obj = &msm_obj->base; in msm_gem_new_impl()
1041 struct msm_gem_object *msm_obj; in _msm_gem_new() local
1066 msm_obj = to_msm_bo(obj); in _msm_gem_new()
1072 mutex_lock(&msm_obj->lock); in _msm_gem_new()
1075 mutex_unlock(&msm_obj->lock); in _msm_gem_new()
1107 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); in _msm_gem_new()
1110 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); in _msm_gem_new()
1137 struct msm_gem_object *msm_obj; in msm_gem_import() local
1158 msm_obj = to_msm_bo(obj); in msm_gem_import()
1159 mutex_lock(&msm_obj->lock); in msm_gem_import()
1160 msm_obj->sgt = sgt; in msm_gem_import()
1161 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); in msm_gem_import()
1162 if (!msm_obj->pages) { in msm_gem_import()
1163 mutex_unlock(&msm_obj->lock); in msm_gem_import()
1168 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); in msm_gem_import()
1170 mutex_unlock(&msm_obj->lock); in msm_gem_import()
1174 mutex_unlock(&msm_obj->lock); in msm_gem_import()
1177 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); in msm_gem_import()
1256 struct msm_gem_object *msm_obj = to_msm_bo(bo); in msm_gem_object_set_name() local
1263 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap); in msm_gem_object_set_name()