Lines Matching +full:space +full:- +full:constraint

2  * Copyright © 2008-2010 Intel Corporation
25 * Chris Wilson <chris@chris-wilson.co.uuk>
43 return !kref_read(&vma->obj->base.refcount);
52 list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) { in ggtt_flush()
73 if (i915_gem_object_get_rcu(vma->obj)) { in grab_vma()
74 if (!i915_gem_object_trylock(vma->obj, ww)) { in grab_vma()
75 i915_gem_object_put(vma->obj); in grab_vma()
80 atomic_and(~I915_VMA_PIN_MASK, &vma->flags); in grab_vma()
91 i915_gem_object_unlock(vma->obj); in ungrab_vma()
92 i915_gem_object_put(vma->obj); in ungrab_vma()
108 list_add(&vma->evict_link, unwind); in mark_free()
109 return drm_mm_scan_add_block(scan, &vma->node); in mark_free()
124 * i915_gem_evict_something - Evict vmas to make room for binding a new one
125 * @vm: address space to evict from
127 * @min_size: size of the desired free space
128 * @alignment: alignment constraint of the desired free space
129 * @color: color for the desired space
134 * This function will try to evict vmas until a free space satisfying the
140 * Since this function is only used to free up virtual address space it only
142 * pinned. Hence obj->pages_pin_count does not protect against eviction.
144 * To clarify: This is for freeing up virtual address space, not for freeing
164 lockdep_assert_held(&vm->mutex); in i915_gem_evict_something()
168 * The goal is to evict objects and amalgamate space in rough LRU order. in i915_gem_evict_something()
183 drm_mm_scan_init_with_range(&scan, &vm->mm, in i915_gem_evict_something()
190 list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) in i915_gem_evict_something()
193 intel_gt_retire_requests(vm->gt); in i915_gem_evict_something()
199 list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) { in i915_gem_evict_something()
204 active = ERR_PTR(-EAGAIN); in i915_gem_evict_something()
208 * We keep this list in a rough least-recently scanned order in i915_gem_evict_something()
214 * frequently replaced after a frame, which are self-evicting! in i915_gem_evict_something()
216 * fairly static, and keeping it in least-recently scan order in i915_gem_evict_something()
222 if (active != ERR_PTR(-EAGAIN) && defer_evict(vma)) { in i915_gem_evict_something()
226 list_move_tail(&vma->vm_link, &vm->bound_list); in i915_gem_evict_something()
236 ret = drm_mm_scan_remove_block(&scan, &vma->node); in i915_gem_evict_something()
245 * purge when inspecting per-process local address spaces. in i915_gem_evict_something()
248 return -ENOSPC; in i915_gem_evict_something()
259 * a switch to the perma-pinned kernel context. This all also gives in i915_gem_evict_something()
264 return -EBUSY; in i915_gem_evict_something()
277 * scanning, therefore store to-be-evicted objects on a in i915_gem_evict_something()
283 if (drm_mm_scan_remove_block(&scan, &vma->node)) { in i915_gem_evict_something()
286 list_del(&vma->evict_link); in i915_gem_evict_something()
303 /* If we find any non-objects (!vma), we cannot evict them */ in i915_gem_evict_something()
304 if (vma->node.color != I915_COLOR_UNEVICTABLE && in i915_gem_evict_something()
309 ret = -ENOSPC; in i915_gem_evict_something()
317 * i915_gem_evict_for_node - Evict vmas to make room for binding a new one
318 * @vm: address space to evict from
325 * To clarify: This is for freeing up virtual address space, not for freeing
335 u64 start = target->start; in i915_gem_evict_for_node()
336 u64 end = start + target->size; in i915_gem_evict_for_node()
340 lockdep_assert_held(&vm->mutex); in i915_gem_evict_for_node()
356 list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) in i915_gem_evict_for_node()
359 intel_gt_retire_requests(vm->gt); in i915_gem_evict_for_node()
365 start -= I915_GTT_PAGE_SIZE; in i915_gem_evict_for_node()
367 /* Always look at the page afterwards to avoid the end-of-GTT */ in i915_gem_evict_for_node()
372 drm_mm_for_each_node_in_range(node, &vm->mm, start, end) { in i915_gem_evict_for_node()
373 /* If we find any non-objects (!vma), we cannot evict them */ in i915_gem_evict_for_node()
374 if (node->color == I915_COLOR_UNEVICTABLE) { in i915_gem_evict_for_node()
375 ret = -ENOSPC; in i915_gem_evict_for_node()
384 * different cache domains within the address space, we have in i915_gem_evict_for_node()
390 if (node->start + node->size == target->start) { in i915_gem_evict_for_node()
391 if (node->color == target->color) in i915_gem_evict_for_node()
394 if (node->start == target->start + target->size) { in i915_gem_evict_for_node()
395 if (node->color == target->color) in i915_gem_evict_for_node()
401 ret = -ENOSPC; in i915_gem_evict_for_node()
406 ret = -ENOSPC; in i915_gem_evict_for_node()
411 ret = -ENOSPC; in i915_gem_evict_for_node()
425 list_add(&vma->evict_link, &eviction_list); in i915_gem_evict_for_node()
440 * i915_gem_evict_vm - Evict all idle vmas from a vm
441 * @vm: Address space to cleanse
446 * then @busy_bo will point to it. -EBUSY is also returned. The caller must drop
447 * the vm->mutex, before trying again to acquire the contended lock. The caller
452 * This is used by the execbuf code as a last-ditch effort to defragment the
453 * address space.
455 * To clarify: This is for freeing up virtual address space, not for freeing
463 lockdep_assert_held(&vm->mutex); in i915_gem_evict_vm()
482 list_for_each_entry(vma, &vm->bound_list, vm_link) { in i915_gem_evict_vm()
491 if (!i915_gem_object_get_rcu(vma->obj) || in i915_gem_evict_vm()
492 (ww && (dma_resv_locking_ctx(vma->obj->base.resv) == &ww->ctx))) { in i915_gem_evict_vm()
494 list_add(&vma->evict_link, &locked_eviction_list); in i915_gem_evict_vm()
498 if (!i915_gem_object_trylock(vma->obj, ww)) { in i915_gem_evict_vm()
500 *busy_bo = vma->obj; /* holds ref */ in i915_gem_evict_vm()
501 ret = -EBUSY; in i915_gem_evict_vm()
504 i915_gem_object_put(vma->obj); in i915_gem_evict_vm()
509 list_add(&vma->evict_link, &eviction_list); in i915_gem_evict_vm()
520 if (ret != -EINTR) /* "Get me out of here!" */ in i915_gem_evict_vm()
524 i915_gem_object_put(vma->obj); in i915_gem_evict_vm()
531 if (ret != -EINTR) /* "Get me out of here!" */ in i915_gem_evict_vm()
535 i915_gem_object_unlock(vma->obj); in i915_gem_evict_vm()
536 i915_gem_object_put(vma->obj); in i915_gem_evict_vm()