Lines Matching +full:dont +full:- +full:validate

29 #include <linux/iosys-map.h>
50 struct ttm_buffer_object *bo = vmf->vma->vm_private_data; in radeon_gem_fault()
51 struct radeon_device *rdev = radeon_get_rdev(bo->bdev); in radeon_gem_fault()
54 down_read(&rdev->pm.mclk_lock); in radeon_gem_fault()
64 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, in radeon_gem_fault()
66 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) in radeon_gem_fault()
70 dma_resv_unlock(bo->base.resv); in radeon_gem_fault()
73 up_read(&rdev->pm.mclk_lock); in radeon_gem_fault()
112 max_size = rdev->mc.gtt_size - rdev->gart_pin_size; in radeon_gem_object_create()
116 return -ENOMEM; in radeon_gem_object_create()
123 if (r != -ERESTARTSYS) { in radeon_gem_object_create()
133 *obj = &robj->tbo.base; in radeon_gem_object_create()
134 (*obj)->funcs = &radeon_gem_object_funcs; in radeon_gem_object_create()
135 robj->pid = task_pid_nr(current); in radeon_gem_object_create()
137 mutex_lock(&rdev->gem.mutex); in radeon_gem_object_create()
138 list_add_tail(&robj->list, &rdev->gem.objects); in radeon_gem_object_create()
139 mutex_unlock(&rdev->gem.mutex); in radeon_gem_object_create()
153 /* work out where to validate the buffer to */ in radeon_gem_set_domain()
165 r = dma_resv_wait_timeout(robj->tbo.base.resv, in radeon_gem_set_domain()
169 r = -EBUSY; in radeon_gem_set_domain()
171 if (r < 0 && r != -EINTR) { in radeon_gem_set_domain()
176 if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) { in radeon_gem_set_domain()
177 /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */ in radeon_gem_set_domain()
178 return -EINVAL; in radeon_gem_set_domain()
185 INIT_LIST_HEAD(&rdev->gem.objects); in radeon_gem_init()
201 struct radeon_device *rdev = rbo->rdev; in radeon_gem_object_open()
202 struct radeon_fpriv *fpriv = file_priv->driver_priv; in radeon_gem_object_open()
203 struct radeon_vm *vm = &fpriv->vm; in radeon_gem_object_open()
207 if ((rdev->family < CHIP_CAYMAN) || in radeon_gem_object_open()
208 (!rdev->accel_working)) { in radeon_gem_object_open()
221 ++bo_va->ref_count; in radeon_gem_object_open()
232 struct radeon_device *rdev = rbo->rdev; in radeon_gem_object_close()
233 struct radeon_fpriv *fpriv = file_priv->driver_priv; in radeon_gem_object_close()
234 struct radeon_vm *vm = &fpriv->vm; in radeon_gem_object_close()
238 if ((rdev->family < CHIP_CAYMAN) || in radeon_gem_object_close()
239 (!rdev->accel_working)) { in radeon_gem_object_close()
245 dev_err(rdev->dev, "leaking bo va because " in radeon_gem_object_close()
251 if (--bo_va->ref_count == 0) { in radeon_gem_object_close()
260 if (r == -EDEADLK) { in radeon_gem_handle_lockup()
263 r = -EAGAIN; in radeon_gem_handle_lockup()
271 struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev); in radeon_gem_object_mmap()
273 if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm)) in radeon_gem_object_mmap()
274 return -EPERM; in radeon_gem_object_mmap()
299 struct radeon_device *rdev = dev->dev_private; in radeon_gem_info_ioctl()
303 man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM); in radeon_gem_info_ioctl()
305 args->vram_size = (u64)man->size << PAGE_SHIFT; in radeon_gem_info_ioctl()
306 args->vram_visible = rdev->mc.visible_vram_size; in radeon_gem_info_ioctl()
307 args->vram_visible -= rdev->vram_pin_size; in radeon_gem_info_ioctl()
308 args->gart_size = rdev->mc.gtt_size; in radeon_gem_info_ioctl()
309 args->gart_size -= rdev->gart_pin_size; in radeon_gem_info_ioctl()
319 return -EOPNOTSUPP; in radeon_gem_pread_ioctl()
327 return -EOPNOTSUPP; in radeon_gem_pwrite_ioctl()
333 struct radeon_device *rdev = dev->dev_private; in radeon_gem_create_ioctl()
339 down_read(&rdev->exclusive_lock); in radeon_gem_create_ioctl()
341 args->size = roundup(args->size, PAGE_SIZE); in radeon_gem_create_ioctl()
342 r = radeon_gem_object_create(rdev, args->size, args->alignment, in radeon_gem_create_ioctl()
343 args->initial_domain, args->flags, in radeon_gem_create_ioctl()
346 up_read(&rdev->exclusive_lock); in radeon_gem_create_ioctl()
351 /* drop reference from allocate - handle holds it now */ in radeon_gem_create_ioctl()
354 up_read(&rdev->exclusive_lock); in radeon_gem_create_ioctl()
358 args->handle = handle; in radeon_gem_create_ioctl()
359 up_read(&rdev->exclusive_lock); in radeon_gem_create_ioctl()
367 struct radeon_device *rdev = dev->dev_private; in radeon_gem_userptr_ioctl()
374 args->addr = untagged_addr(args->addr); in radeon_gem_userptr_ioctl()
376 if (offset_in_page(args->addr | args->size)) in radeon_gem_userptr_ioctl()
377 return -EINVAL; in radeon_gem_userptr_ioctl()
380 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | in radeon_gem_userptr_ioctl()
383 return -EINVAL; in radeon_gem_userptr_ioctl()
385 if (args->flags & RADEON_GEM_USERPTR_READONLY) { in radeon_gem_userptr_ioctl()
387 if (rdev->family < CHIP_R600) in radeon_gem_userptr_ioctl()
388 return -EINVAL; in radeon_gem_userptr_ioctl()
390 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) || in radeon_gem_userptr_ioctl()
391 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) { in radeon_gem_userptr_ioctl()
395 return -EACCES; in radeon_gem_userptr_ioctl()
398 down_read(&rdev->exclusive_lock); in radeon_gem_userptr_ioctl()
401 r = radeon_gem_object_create(rdev, args->size, 0, in radeon_gem_userptr_ioctl()
408 r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags); in radeon_gem_userptr_ioctl()
412 if (args->flags & RADEON_GEM_USERPTR_REGISTER) { in radeon_gem_userptr_ioctl()
413 r = radeon_mn_register(bo, args->addr); in radeon_gem_userptr_ioctl()
418 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { in radeon_gem_userptr_ioctl()
419 mmap_read_lock(current->mm); in radeon_gem_userptr_ioctl()
422 mmap_read_unlock(current->mm); in radeon_gem_userptr_ioctl()
427 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in radeon_gem_userptr_ioctl()
429 mmap_read_unlock(current->mm); in radeon_gem_userptr_ioctl()
435 /* drop reference from allocate - handle holds it now */ in radeon_gem_userptr_ioctl()
440 args->handle = handle; in radeon_gem_userptr_ioctl()
441 up_read(&rdev->exclusive_lock); in radeon_gem_userptr_ioctl()
448 up_read(&rdev->exclusive_lock); in radeon_gem_userptr_ioctl()
457 /* transition the BO to a domain - in radeon_gem_set_domain_ioctl()
458 * just validate the BO into a certain domain */ in radeon_gem_set_domain_ioctl()
459 struct radeon_device *rdev = dev->dev_private; in radeon_gem_set_domain_ioctl()
464 /* for now if someone requests domain CPU - in radeon_gem_set_domain_ioctl()
466 down_read(&rdev->exclusive_lock); in radeon_gem_set_domain_ioctl()
469 gobj = drm_gem_object_lookup(filp, args->handle); in radeon_gem_set_domain_ioctl()
471 up_read(&rdev->exclusive_lock); in radeon_gem_set_domain_ioctl()
472 return -ENOENT; in radeon_gem_set_domain_ioctl()
475 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); in radeon_gem_set_domain_ioctl()
478 up_read(&rdev->exclusive_lock); in radeon_gem_set_domain_ioctl()
492 return -ENOENT; in radeon_mode_dumb_mmap()
495 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) { in radeon_mode_dumb_mmap()
497 return -EPERM; in radeon_mode_dumb_mmap()
509 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); in radeon_gem_mmap_ioctl()
521 gobj = drm_gem_object_lookup(filp, args->handle); in radeon_gem_busy_ioctl()
523 return -ENOENT; in radeon_gem_busy_ioctl()
527 r = dma_resv_test_signaled(robj->tbo.base.resv, DMA_RESV_USAGE_READ); in radeon_gem_busy_ioctl()
529 r = -EBUSY; in radeon_gem_busy_ioctl()
533 cur_placement = READ_ONCE(robj->tbo.resource->mem_type); in radeon_gem_busy_ioctl()
534 args->domain = radeon_mem_type_to_domain(cur_placement); in radeon_gem_busy_ioctl()
542 struct radeon_device *rdev = dev->dev_private; in radeon_gem_wait_idle_ioctl()
550 gobj = drm_gem_object_lookup(filp, args->handle); in radeon_gem_wait_idle_ioctl()
552 return -ENOENT; in radeon_gem_wait_idle_ioctl()
556 ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ, in radeon_gem_wait_idle_ioctl()
559 r = -EBUSY; in radeon_gem_wait_idle_ioctl()
564 cur_placement = READ_ONCE(robj->tbo.resource->mem_type); in radeon_gem_wait_idle_ioctl()
565 if (rdev->asic->mmio_hdp_flush && in radeon_gem_wait_idle_ioctl()
567 robj->rdev->asic->mmio_hdp_flush(rdev); in radeon_gem_wait_idle_ioctl()
581 DRM_DEBUG("%d \n", args->handle); in radeon_gem_set_tiling_ioctl()
582 gobj = drm_gem_object_lookup(filp, args->handle); in radeon_gem_set_tiling_ioctl()
584 return -ENOENT; in radeon_gem_set_tiling_ioctl()
586 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); in radeon_gem_set_tiling_ioctl()
600 gobj = drm_gem_object_lookup(filp, args->handle); in radeon_gem_get_tiling_ioctl()
602 return -ENOENT; in radeon_gem_get_tiling_ioctl()
607 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); in radeon_gem_get_tiling_ioctl()
615 * radeon_gem_va_update_vm -update the bo_va in its VM
635 tv.bo = &bo_va->bo->tbo; in radeon_gem_va_update_vm()
639 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list); in radeon_gem_va_update_vm()
648 domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type); in radeon_gem_va_update_vm()
655 mutex_lock(&bo_va->vm->mutex); in radeon_gem_va_update_vm()
656 r = radeon_vm_clear_freed(rdev, bo_va->vm); in radeon_gem_va_update_vm()
660 if (bo_va->it.start) in radeon_gem_va_update_vm()
661 r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource); in radeon_gem_va_update_vm()
664 mutex_unlock(&bo_va->vm->mutex); in radeon_gem_va_update_vm()
672 if (r && r != -ERESTARTSYS) in radeon_gem_va_update_vm()
681 struct radeon_device *rdev = dev->dev_private; in radeon_gem_va_ioctl()
682 struct radeon_fpriv *fpriv = filp->driver_priv; in radeon_gem_va_ioctl()
688 if (!rdev->vm_manager.enabled) { in radeon_gem_va_ioctl()
689 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
690 return -ENOTTY; in radeon_gem_va_ioctl()
693 /* !! DONT REMOVE !! in radeon_gem_va_ioctl()
698 if (args->vm_id) { in radeon_gem_va_ioctl()
699 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
700 return -EINVAL; in radeon_gem_va_ioctl()
703 if (args->offset < RADEON_VA_RESERVED_SIZE) { in radeon_gem_va_ioctl()
704 dev_err(dev->dev, in radeon_gem_va_ioctl()
706 (unsigned long)args->offset, in radeon_gem_va_ioctl()
708 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
709 return -EINVAL; in radeon_gem_va_ioctl()
717 if ((args->flags & invalid_flags)) { in radeon_gem_va_ioctl()
718 dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n", in radeon_gem_va_ioctl()
719 args->flags, invalid_flags); in radeon_gem_va_ioctl()
720 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
721 return -EINVAL; in radeon_gem_va_ioctl()
724 switch (args->operation) { in radeon_gem_va_ioctl()
729 dev_err(dev->dev, "unsupported operation %d\n", in radeon_gem_va_ioctl()
730 args->operation); in radeon_gem_va_ioctl()
731 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
732 return -EINVAL; in radeon_gem_va_ioctl()
735 gobj = drm_gem_object_lookup(filp, args->handle); in radeon_gem_va_ioctl()
737 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
738 return -ENOENT; in radeon_gem_va_ioctl()
743 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
747 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); in radeon_gem_va_ioctl()
749 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
752 return -ENOENT; in radeon_gem_va_ioctl()
755 switch (args->operation) { in radeon_gem_va_ioctl()
757 if (bo_va->it.start) { in radeon_gem_va_ioctl()
758 args->operation = RADEON_VA_RESULT_VA_EXIST; in radeon_gem_va_ioctl()
759 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE; in radeon_gem_va_ioctl()
763 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); in radeon_gem_va_ioctl()
773 args->operation = RADEON_VA_RESULT_OK; in radeon_gem_va_ioctl()
775 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
790 gobj = drm_gem_object_lookup(filp, args->handle); in radeon_gem_op_ioctl()
792 return -ENOENT; in radeon_gem_op_ioctl()
796 r = -EPERM; in radeon_gem_op_ioctl()
797 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) in radeon_gem_op_ioctl()
804 switch (args->op) { in radeon_gem_op_ioctl()
806 args->value = robj->initial_domain; in radeon_gem_op_ioctl()
809 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | in radeon_gem_op_ioctl()
814 r = -EINVAL; in radeon_gem_op_ioctl()
851 struct radeon_device *rdev = dev->dev_private; in radeon_mode_dumb_create()
856 args->pitch = radeon_align_pitch(rdev, args->width, in radeon_mode_dumb_create()
857 DIV_ROUND_UP(args->bpp, 8), 0); in radeon_mode_dumb_create()
858 args->size = (u64)args->pitch * args->height; in radeon_mode_dumb_create()
859 args->size = ALIGN(args->size, PAGE_SIZE); in radeon_mode_dumb_create()
861 r = radeon_gem_object_create(rdev, args->size, 0, in radeon_mode_dumb_create()
865 return -ENOMEM; in radeon_mode_dumb_create()
868 /* drop reference from allocate - handle holds it now */ in radeon_mode_dumb_create()
873 args->handle = handle; in radeon_mode_dumb_create()
880 struct radeon_device *rdev = m->private; in radeon_debugfs_gem_info_show()
884 mutex_lock(&rdev->gem.mutex); in radeon_debugfs_gem_info_show()
885 list_for_each_entry(rbo, &rdev->gem.objects, list) { in radeon_debugfs_gem_info_show()
889 domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type); in radeon_debugfs_gem_info_show()
904 placement, (unsigned long)rbo->pid); in radeon_debugfs_gem_info_show()
907 mutex_unlock(&rdev->gem.mutex); in radeon_debugfs_gem_info_show()
917 struct dentry *root = rdev->ddev->primary->debugfs_root; in radeon_gem_debugfs_init()