Lines Matching refs:nvbo
42 struct nouveau_bo *nvbo = nouveau_gem_object(gem); in nouveau_gem_object_del() local
43 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_gem_object_del()
44 struct ttm_buffer_object *bo = &nvbo->bo; in nouveau_gem_object_del()
53 drm_prime_gem_destroy(gem, nvbo->bo.sg); in nouveau_gem_object_del()
69 struct nouveau_bo *nvbo = nouveau_gem_object(gem); in nouveau_gem_object_open() local
70 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_gem_object_open()
78 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); in nouveau_gem_object_open()
86 ret = nouveau_vma_new(nvbo, &cli->vmm, &vma); in nouveau_gem_object_open()
90 ttm_bo_unreserve(&nvbo->bo); in nouveau_gem_object_open()
116 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) in nouveau_gem_object_unmap() argument
143 struct nouveau_bo *nvbo = nouveau_gem_object(gem); in nouveau_gem_object_close() local
144 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_gem_object_close()
152 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); in nouveau_gem_object_close()
156 vma = nouveau_vma_find(nvbo, &cli->vmm); in nouveau_gem_object_close()
161 nouveau_gem_object_unmap(nvbo, vma); in nouveau_gem_object_close()
167 ttm_bo_unreserve(&nvbo->bo); in nouveau_gem_object_close()
176 struct nouveau_bo *nvbo; in nouveau_gem_new() local
194 nvbo = *pnvbo; in nouveau_gem_new()
200 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | in nouveau_gem_new()
203 nvbo->valid_domains &= domain; in nouveau_gem_new()
207 ret = drm_gem_object_init(drm->dev, &nvbo->gem, nvbo->bo.mem.size); in nouveau_gem_new()
213 nvbo->bo.persistent_swap_storage = nvbo->gem.filp; in nouveau_gem_new()
222 struct nouveau_bo *nvbo = nouveau_gem_object(gem); in nouveau_gem_info() local
225 if (is_power_of_2(nvbo->valid_domains)) in nouveau_gem_info()
226 rep->domain = nvbo->valid_domains; in nouveau_gem_info()
227 else if (nvbo->bo.mem.mem_type == TTM_PL_TT) in nouveau_gem_info()
231 rep->offset = nvbo->bo.offset; in nouveau_gem_info()
233 vma = nouveau_vma_find(nvbo, &cli->vmm); in nouveau_gem_info()
240 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; in nouveau_gem_info()
241 rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node); in nouveau_gem_info()
242 rep->tile_mode = nvbo->mode; in nouveau_gem_info()
243 rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG; in nouveau_gem_info()
245 rep->tile_flags |= nvbo->kind << 8; in nouveau_gem_info()
248 rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16; in nouveau_gem_info()
250 rep->tile_flags |= nvbo->zeta; in nouveau_gem_info()
260 struct nouveau_bo *nvbo = NULL; in nouveau_gem_ioctl_new() local
265 req->info.tile_flags, &nvbo); in nouveau_gem_ioctl_new()
269 ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle); in nouveau_gem_ioctl_new()
271 ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info); in nouveau_gem_ioctl_new()
277 drm_gem_object_put_unlocked(&nvbo->gem); in nouveau_gem_ioctl_new()
285 struct nouveau_bo *nvbo = nouveau_gem_object(gem); in nouveau_gem_set_domain() local
286 struct ttm_buffer_object *bo = &nvbo->bo; in nouveau_gem_set_domain()
287 uint32_t domains = valid_domains & nvbo->valid_domains & in nouveau_gem_set_domain()
314 nouveau_bo_placement_set(nvbo, pref_flags, valid_flags); in nouveau_gem_set_domain()
328 struct nouveau_bo *nvbo; in validate_fini_no_ticket() local
332 nvbo = list_entry(op->list.next, struct nouveau_bo, entry); in validate_fini_no_ticket()
333 b = &pbbo[nvbo->pbbo_index]; in validate_fini_no_ticket()
336 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in validate_fini_no_ticket()
339 nouveau_bo_fence(nvbo, fence, !!b->write_domains); in validate_fini_no_ticket()
349 if (unlikely(nvbo->validate_mapped)) { in validate_fini_no_ticket()
350 ttm_bo_kunmap(&nvbo->kmap); in validate_fini_no_ticket()
351 nvbo->validate_mapped = false; in validate_fini_no_ticket()
354 list_del(&nvbo->entry); in validate_fini_no_ticket()
355 nvbo->reserved_by = NULL; in validate_fini_no_ticket()
356 ttm_bo_unreserve(&nvbo->bo); in validate_fini_no_ticket()
357 drm_gem_object_put_unlocked(&nvbo->gem); in validate_fini_no_ticket()
392 struct nouveau_bo *nvbo; in validate_init() local
400 nvbo = nouveau_gem_object(gem); in validate_init()
401 if (nvbo == res_bo) { in validate_init()
407 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { in validate_init()
415 ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket); in validate_init()
422 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, in validate_init()
425 res_bo = nvbo; in validate_init()
436 struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm); in validate_init()
445 b->user_priv = (uint64_t)(unsigned long)nvbo; in validate_init()
448 nvbo->reserved_by = file_priv; in validate_init()
449 nvbo->pbbo_index = i; in validate_init()
452 list_add_tail(&nvbo->entry, &both_list); in validate_init()
455 list_add_tail(&nvbo->entry, &vram_list); in validate_init()
458 list_add_tail(&nvbo->entry, &gart_list); in validate_init()
462 list_add_tail(&nvbo->entry, &both_list); in validate_init()
466 if (nvbo == res_bo) in validate_init()
488 struct nouveau_bo *nvbo; in validate_list() local
491 list_for_each_entry(nvbo, list, entry) { in validate_list()
492 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; in validate_list()
494 ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains, in validate_list()
502 ret = nouveau_bo_validate(nvbo, true, false); in validate_list()
509 ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true); in validate_list()
517 if (nvbo->bo.offset == b->presumed.offset && in validate_list()
518 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && in validate_list()
520 (nvbo->bo.mem.mem_type == TTM_PL_TT && in validate_list()
524 if (nvbo->bo.mem.mem_type == TTM_PL_TT) in validate_list()
528 b->presumed.offset = nvbo->bo.offset; in validate_list()
532 if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed, in validate_list()
616 struct nouveau_bo *nvbo; in nouveau_gem_pushbuf_reloc_apply() local
634 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv; in nouveau_gem_pushbuf_reloc_apply()
637 nvbo->bo.mem.num_pages << PAGE_SHIFT)) { in nouveau_gem_pushbuf_reloc_apply()
643 if (!nvbo->kmap.virtual) { in nouveau_gem_pushbuf_reloc_apply()
644 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, in nouveau_gem_pushbuf_reloc_apply()
645 &nvbo->kmap); in nouveau_gem_pushbuf_reloc_apply()
650 nvbo->validate_mapped = true; in nouveau_gem_pushbuf_reloc_apply()
668 ret = ttm_bo_wait(&nvbo->bo, false, false); in nouveau_gem_pushbuf_reloc_apply()
674 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); in nouveau_gem_pushbuf_reloc_apply()
793 struct nouveau_bo *nvbo = (void *)(unsigned long) in nouveau_gem_ioctl_pushbuf() local
796 OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2); in nouveau_gem_ioctl_pushbuf()
807 struct nouveau_bo *nvbo = (void *)(unsigned long) in nouveau_gem_ioctl_pushbuf() local
814 if (!nvbo->kmap.virtual) { in nouveau_gem_ioctl_pushbuf()
815 ret = ttm_bo_kmap(&nvbo->bo, 0, in nouveau_gem_ioctl_pushbuf()
816 nvbo->bo.mem. in nouveau_gem_ioctl_pushbuf()
818 &nvbo->kmap); in nouveau_gem_ioctl_pushbuf()
823 nvbo->validate_mapped = true; in nouveau_gem_ioctl_pushbuf()
826 nouveau_bo_wr32(nvbo, (push[i].offset + in nouveau_gem_ioctl_pushbuf()
831 (nvbo->bo.offset + push[i].offset)); in nouveau_gem_ioctl_pushbuf()
876 struct nouveau_bo *nvbo; in nouveau_gem_ioctl_cpu_prep() local
885 nvbo = nouveau_gem_object(gem); in nouveau_gem_ioctl_cpu_prep()
887 lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, in nouveau_gem_ioctl_cpu_prep()
896 nouveau_bo_sync_for_cpu(nvbo); in nouveau_gem_ioctl_cpu_prep()
908 struct nouveau_bo *nvbo; in nouveau_gem_ioctl_cpu_fini() local
913 nvbo = nouveau_gem_object(gem); in nouveau_gem_ioctl_cpu_fini()
915 nouveau_bo_sync_for_device(nvbo); in nouveau_gem_ioctl_cpu_fini()