Lines Matching +full:sync +full:- +full:mode

15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
43 * amdgpu_sync_create - zero init sync object
45 * @sync: sync object to initialize
47 * Just clear the sync object for now.
49 void amdgpu_sync_create(struct amdgpu_sync *sync) in amdgpu_sync_create() argument
51 hash_init(sync->fences); in amdgpu_sync_create()
52 sync->last_vm_update = NULL; in amdgpu_sync_create()
56 * amdgpu_sync_same_dev - test if fence belong to us
71 ring = container_of(s_fence->sched, struct amdgpu_ring, sched); in amdgpu_sync_same_dev()
72 return ring->adev == adev; in amdgpu_sync_same_dev()
79 * amdgpu_sync_get_owner - extract the owner of a fence
95 return s_fence->owner; in amdgpu_sync_get_owner()
105 * amdgpu_sync_keep_later - Keep the later fence
123 * amdgpu_sync_add_later - add the fence to the hash
125 * @sync: sync object to add the fence to
131 static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f) in amdgpu_sync_add_later() argument
135 hash_for_each_possible(sync->fences, e, node, f->context) { in amdgpu_sync_add_later()
136 if (unlikely(e->fence->context != f->context)) in amdgpu_sync_add_later()
139 amdgpu_sync_keep_later(&e->fence, f); in amdgpu_sync_add_later()
146 * amdgpu_sync_fence - remember to sync to this fence
148 * @sync: sync object to add fence to
149 * @f: fence to sync to
151 * Add the fence to the sync object.
153 int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f) in amdgpu_sync_fence() argument
160 if (amdgpu_sync_add_later(sync, f)) in amdgpu_sync_fence()
165 return -ENOMEM; in amdgpu_sync_fence()
167 hash_add(sync->fences, &e->node, f->context); in amdgpu_sync_fence()
168 e->fence = dma_fence_get(f); in amdgpu_sync_fence()
173 * amdgpu_sync_vm_fence - remember to sync to this VM fence
176 * @sync: sync object to add fence to
179 * Add the fence to the sync object and remember it as VM update.
181 int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence) in amdgpu_sync_vm_fence() argument
186 amdgpu_sync_keep_later(&sync->last_vm_update, fence); in amdgpu_sync_vm_fence()
187 return amdgpu_sync_fence(sync, fence); in amdgpu_sync_vm_fence()
191 * amdgpu_sync_resv - sync to a reservation object
193 * @sync: sync object to add fences from reservation object to
195 * @mode: how owner affects which fences we sync to
198 * Sync to the fence
200 int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, in amdgpu_sync_resv() argument
201 struct dma_resv *resv, enum amdgpu_sync_mode mode, in amdgpu_sync_resv() argument
210 return -EINVAL; in amdgpu_sync_resv()
212 /* always sync to the exclusive fence */ in amdgpu_sync_resv()
214 r = amdgpu_sync_fence(sync, f); in amdgpu_sync_resv()
220 for (i = 0; i < flist->shared_count; ++i) { in amdgpu_sync_resv()
223 f = rcu_dereference_protected(flist->shared[i], in amdgpu_sync_resv()
228 /* Always sync to moves, no matter what */ in amdgpu_sync_resv()
230 r = amdgpu_sync_fence(sync, f); in amdgpu_sync_resv()
242 /* Never sync to VM updates either. */ in amdgpu_sync_resv()
247 /* Ignore fences depending on the sync mode */ in amdgpu_sync_resv()
248 switch (mode) { in amdgpu_sync_resv()
269 "Adding eviction fence to sync obj"); in amdgpu_sync_resv()
270 r = amdgpu_sync_fence(sync, f); in amdgpu_sync_resv()
278 * amdgpu_sync_peek_fence - get the next fence not signaled yet
280 * @sync: the sync object
283 * Returns the next fence not signaled yet without removing it from the sync
286 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, in amdgpu_sync_peek_fence() argument
293 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_peek_fence()
294 struct dma_fence *f = e->fence; in amdgpu_sync_peek_fence()
298 hash_del(&e->node); in amdgpu_sync_peek_fence()
307 if (s_fence->sched == &ring->sched) { in amdgpu_sync_peek_fence()
308 if (dma_fence_is_signaled(&s_fence->scheduled)) in amdgpu_sync_peek_fence()
311 return &s_fence->scheduled; in amdgpu_sync_peek_fence()
322 * amdgpu_sync_get_fence - get the next fence from the sync object
324 * @sync: sync object to use
326 * Get and removes the next fence from the sync object not signaled yet.
328 struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) in amdgpu_sync_get_fence() argument
334 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_get_fence()
336 f = e->fence; in amdgpu_sync_get_fence()
338 hash_del(&e->node); in amdgpu_sync_get_fence()
350 * amdgpu_sync_clone - clone a sync object
352 * @source: sync object to clone
353 * @clone: pointer to destination sync object
365 hash_for_each_safe(source->fences, i, tmp, e, node) { in amdgpu_sync_clone()
366 f = e->fence; in amdgpu_sync_clone()
372 hash_del(&e->node); in amdgpu_sync_clone()
378 dma_fence_put(clone->last_vm_update); in amdgpu_sync_clone()
379 clone->last_vm_update = dma_fence_get(source->last_vm_update); in amdgpu_sync_clone()
384 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr) in amdgpu_sync_wait() argument
390 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_wait()
391 r = dma_fence_wait(e->fence, intr); in amdgpu_sync_wait()
395 hash_del(&e->node); in amdgpu_sync_wait()
396 dma_fence_put(e->fence); in amdgpu_sync_wait()
404 * amdgpu_sync_free - free the sync object
406 * @sync: sync object to use
408 * Free the sync object.
410 void amdgpu_sync_free(struct amdgpu_sync *sync) in amdgpu_sync_free() argument
416 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_free()
417 hash_del(&e->node); in amdgpu_sync_free()
418 dma_fence_put(e->fence); in amdgpu_sync_free()
422 dma_fence_put(sync->last_vm_update); in amdgpu_sync_free()
426 * amdgpu_sync_init - init sync object subsystem
436 return -ENOMEM; in amdgpu_sync_init()
442 * amdgpu_sync_fini - fini sync object subsystem