Lines Matching refs:bdev

67 		man = ttm_manager_type(bo->bdev, mem_type);  in ttm_bo_mem_space_debug()
74 struct ttm_device *bdev = bo->bdev; in ttm_bo_del_from_lru() local
78 if (bdev->funcs->del_from_lru_notify) in ttm_bo_del_from_lru()
79 bdev->funcs->del_from_lru_notify(bo); in ttm_bo_del_from_lru()
94 struct ttm_device *bdev = bo->bdev; in ttm_bo_move_to_lru_tail() local
108 man = ttm_manager_type(bdev, mem->mem_type); in ttm_bo_move_to_lru_tail()
111 if (bdev->funcs->del_from_lru_notify) in ttm_bo_move_to_lru_tail()
112 bdev->funcs->del_from_lru_notify(bo); in ttm_bo_move_to_lru_tail()
142 man = ttm_manager_type(pos->first->bdev, TTM_PL_TT); in ttm_bo_bulk_move_lru_tail()
157 man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM); in ttm_bo_bulk_move_lru_tail()
170 struct ttm_device *bdev = bo->bdev; in ttm_bo_handle_move_mem() local
173 old_man = ttm_manager_type(bdev, bo->resource->mem_type); in ttm_bo_handle_move_mem()
174 new_man = ttm_manager_type(bdev, mem->mem_type); in ttm_bo_handle_move_mem()
191 ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx); in ttm_bo_handle_move_mem()
197 ret = bdev->funcs->move(bo, evict, ctx, mem, hop); in ttm_bo_handle_move_mem()
208 new_man = ttm_manager_type(bdev, bo->resource->mem_type); in ttm_bo_handle_move_mem()
225 if (bo->bdev->funcs->delete_mem_notify) in ttm_bo_cleanup_memtype_use()
226 bo->bdev->funcs->delete_mem_notify(bo); in ttm_bo_cleanup_memtype_use()
251 spin_lock(&bo->bdev->lru_lock); in ttm_bo_individualize_resv()
253 spin_unlock(&bo->bdev->lru_lock); in ttm_bo_individualize_resv()
312 spin_unlock(&bo->bdev->lru_lock); in ttm_bo_cleanup_refs()
322 spin_lock(&bo->bdev->lru_lock); in ttm_bo_cleanup_refs()
332 spin_unlock(&bo->bdev->lru_lock); in ttm_bo_cleanup_refs()
341 spin_unlock(&bo->bdev->lru_lock); in ttm_bo_cleanup_refs()
347 spin_unlock(&bo->bdev->lru_lock); in ttm_bo_cleanup_refs()
362 bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all) in ttm_bo_delayed_delete() argument
369 spin_lock(&bdev->lru_lock); in ttm_bo_delayed_delete()
370 while (!list_empty(&bdev->ddestroy)) { in ttm_bo_delayed_delete()
373 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object, in ttm_bo_delayed_delete()
380 spin_unlock(&bdev->lru_lock); in ttm_bo_delayed_delete()
383 spin_lock(&bdev->lru_lock); in ttm_bo_delayed_delete()
389 spin_unlock(&bdev->lru_lock); in ttm_bo_delayed_delete()
393 spin_lock(&bdev->lru_lock); in ttm_bo_delayed_delete()
395 list_splice_tail(&removed, &bdev->ddestroy); in ttm_bo_delayed_delete()
396 empty = list_empty(&bdev->ddestroy); in ttm_bo_delayed_delete()
397 spin_unlock(&bdev->lru_lock); in ttm_bo_delayed_delete()
406 struct ttm_device *bdev = bo->bdev; in ttm_bo_release() local
421 if (bo->bdev->funcs->release_notify) in ttm_bo_release()
422 bo->bdev->funcs->release_notify(bo); in ttm_bo_release()
424 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node); in ttm_bo_release()
425 ttm_mem_io_free(bdev, bo->resource); in ttm_bo_release()
434 spin_lock(&bo->bdev->lru_lock); in ttm_bo_release()
450 list_add_tail(&bo->ddestroy, &bdev->ddestroy); in ttm_bo_release()
451 spin_unlock(&bo->bdev->lru_lock); in ttm_bo_release()
453 schedule_delayed_work(&bdev->wq, in ttm_bo_release()
458 spin_lock(&bo->bdev->lru_lock); in ttm_bo_release()
461 spin_unlock(&bo->bdev->lru_lock); in ttm_bo_release()
477 int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev) in ttm_bo_lock_delayed_workqueue() argument
479 return cancel_delayed_work_sync(&bdev->wq); in ttm_bo_lock_delayed_workqueue()
483 void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched) in ttm_bo_unlock_delayed_workqueue() argument
486 schedule_delayed_work(&bdev->wq, in ttm_bo_unlock_delayed_workqueue()
519 struct ttm_device *bdev = bo->bdev; in ttm_bo_evict() local
531 bdev->funcs->evict_flags(bo, &placement); in ttm_bo_evict()
620 if (ret && place && !bo->bdev->funcs->eviction_valuable(bo, place)) { in ttm_bo_evict_swapout_allowable()
666 int ttm_mem_evict_first(struct ttm_device *bdev, in ttm_mem_evict_first() argument
677 spin_lock(&bdev->lru_lock); in ttm_mem_evict_first()
708 spin_unlock(&bdev->lru_lock); in ttm_mem_evict_first()
722 spin_unlock(&bdev->lru_lock); in ttm_mem_evict_first()
780 struct ttm_device *bdev = bo->bdev; in ttm_bo_mem_force_space() local
785 man = ttm_manager_type(bdev, place->mem_type); in ttm_bo_mem_force_space()
793 ret = ttm_mem_evict_first(bdev, man, place, ctx, in ttm_bo_mem_force_space()
815 struct ttm_device *bdev = bo->bdev; in ttm_bo_mem_space() local
827 man = ttm_manager_type(bdev, place->mem_type); in ttm_bo_mem_space()
853 man = ttm_manager_type(bdev, place->mem_type); in ttm_bo_mem_space()
997 int ttm_bo_init_reserved(struct ttm_device *bdev, in ttm_bo_init_reserved() argument
1017 bo->bdev = bdev; in ttm_bo_init_reserved()
1043 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node, in ttm_bo_init_reserved()
1071 int ttm_bo_init(struct ttm_device *bdev, in ttm_bo_init() argument
1085 ret = ttm_bo_init_reserved(bdev, bo, size, type, placement, in ttm_bo_init()
1103 struct ttm_device *bdev = bo->bdev; in ttm_bo_unmap_virtual() local
1105 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); in ttm_bo_unmap_virtual()
1106 ttm_mem_io_free(bdev, bo->resource); in ttm_bo_unmap_virtual()
1170 spin_unlock(&bo->bdev->lru_lock); in ttm_bo_swapout()
1205 if (bo->bdev->funcs->swap_notify) in ttm_bo_swapout()
1206 bo->bdev->funcs->swap_notify(bo); in ttm_bo_swapout()
1209 ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags); in ttm_bo_swapout()
1227 ttm_tt_destroy(bo->bdev, bo->ttm); in ttm_bo_tt_destroy()