Home
last modified time | relevance | path

Searched refs:fences (Results 1 – 25 of 41) sorted by relevance

12

/Linux-v5.4/drivers/dma-buf/
Dsync_file.c149 struct dma_fence **fences, int num_fences) in sync_file_set_fence() argument
160 sync_file->fence = fences[0]; in sync_file_set_fence()
161 kfree(fences); in sync_file_set_fence()
163 array = dma_fence_array_create(num_fences, fences, in sync_file_set_fence()
182 return array->fences; in get_fences()
189 static void add_fence(struct dma_fence **fences, in add_fence() argument
192 fences[*i] = fence; in add_fence()
214 struct dma_fence **fences, **nfences, **a_fences, **b_fences; in sync_file_merge() local
228 fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); in sync_file_merge()
229 if (!fences) in sync_file_merge()
[all …]
Ddma-fence-array.c87 if (dma_fence_add_callback(array->fences[i], &cb[i].cb, in dma_fence_array_enable_signaling()
89 int error = array->fences[i]->error; in dma_fence_array_enable_signaling()
116 dma_fence_put(array->fences[i]); in dma_fence_array_release()
118 kfree(array->fences); in dma_fence_array_release()
151 struct dma_fence **fences, in dma_fence_array_create() argument
171 array->fences = fences; in dma_fence_array_create()
197 if (array->fences[i]->context != context) in dma_fence_match_context()
Ddma-fence.c525 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count, in dma_fence_test_signaled_any() argument
531 struct dma_fence *fence = fences[i]; in dma_fence_test_signaled_any()
562 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, in dma_fence_wait_any_timeout() argument
569 if (WARN_ON(!fences || !count || timeout < 0)) in dma_fence_wait_any_timeout()
574 if (dma_fence_is_signaled(fences[i])) { in dma_fence_wait_any_timeout()
590 struct dma_fence *fence = fences[i]; in dma_fence_wait_any_timeout()
608 if (dma_fence_test_signaled_any(fences, count, idx)) in dma_fence_wait_any_timeout()
621 dma_fence_remove_callback(fences[i], &cb[i].base); in dma_fence_wait_any_timeout()
Dst-dma-fence.c432 struct dma_fence __rcu **fences; member
461 rcu_assign_pointer(t->fences[t->id], f1); in thread_signal_callback()
466 f2 = dma_fence_get_rcu_safe(&t->fences[!t->id]); in thread_signal_callback()
495 rcu_assign_pointer(t->fences[t->id], NULL); in thread_signal_callback()
519 t[i].fences = f; in race_signal_callback()
/Linux-v5.4/drivers/gpu/drm/amd/amdgpu/
Damdgpu_ids.c110 struct dma_fence *fence, **fences; in amdgpu_pasid_free_delayed() local
115 r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences); in amdgpu_pasid_free_delayed()
125 fence = fences[0]; in amdgpu_pasid_free_delayed()
126 kfree(fences); in amdgpu_pasid_free_delayed()
131 array = dma_fence_array_create(count, fences, context, in amdgpu_pasid_free_delayed()
134 kfree(fences); in amdgpu_pasid_free_delayed()
204 struct dma_fence **fences; in amdgpu_vmid_grab_idle() local
211 fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); in amdgpu_vmid_grab_idle()
212 if (!fences) in amdgpu_vmid_grab_idle()
218 fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring); in amdgpu_vmid_grab_idle()
[all …]
Damdgpu_sync.c52 hash_init(sync->fences); in amdgpu_sync_create()
136 hash_for_each_possible(sync->fences, e, node, f->context) { in amdgpu_sync_add_later()
177 hash_add(sync->fences, &e->node, f->context); in amdgpu_sync_fence()
265 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_peek_fence()
307 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_get_fence()
340 hash_for_each_safe(source->fences, i, tmp, e, node) { in amdgpu_sync_clone()
365 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_wait()
391 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_free()
Damdgpu_sa.c207 struct dma_fence **fences, in amdgpu_sa_bo_next_hole() argument
229 fences[i] = NULL; in amdgpu_sa_bo_next_hole()
238 fences[i] = sa_bo->fence; in amdgpu_sa_bo_next_hole()
279 struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; in amdgpu_sa_bo_new() local
314 } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); in amdgpu_sa_bo_new()
317 if (fences[i]) in amdgpu_sa_bo_new()
318 fences[count++] = dma_fence_get(fences[i]); in amdgpu_sa_bo_new()
322 t = dma_fence_wait_any_timeout(fences, count, false, in amdgpu_sa_bo_new()
326 dma_fence_put(fences[i]); in amdgpu_sa_bo_new()
Damdgpu_dma_buf.c142 struct dma_fence **fences; in __dma_resv_make_exclusive() local
149 r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences); in __dma_resv_make_exclusive()
156 dma_resv_add_excl_fence(obj, fences[0]); in __dma_resv_make_exclusive()
157 dma_fence_put(fences[0]); in __dma_resv_make_exclusive()
158 kfree(fences); in __dma_resv_make_exclusive()
162 array = dma_fence_array_create(count, fences, in __dma_resv_make_exclusive()
176 dma_fence_put(fences[count]); in __dma_resv_make_exclusive()
177 kfree(fences); in __dma_resv_make_exclusive()
Damdgpu_ctx.c90 ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities, in amdgpu_ctx_init()
92 if (!ctx->fences) in amdgpu_ctx_init()
107 entity->fences = &ctx->fences[amdgpu_sched_jobs * i]; in amdgpu_ctx_init()
202 kfree(ctx->fences); in amdgpu_ctx_init()
203 ctx->fences = NULL; in amdgpu_ctx_init()
219 dma_fence_put(ctx->entities[0][i].fences[j]); in amdgpu_ctx_fini()
220 kfree(ctx->fences); in amdgpu_ctx_fini()
476 other = centity->fences[idx]; in amdgpu_ctx_add_fence()
483 centity->fences[idx] = fence; in amdgpu_ctx_add_fence()
515 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]); in amdgpu_ctx_get_fence()
[all …]
Damdgpu_fence.c158 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_emit()
261 ptr = &drv->fences[last_seq]; in amdgpu_fence_process()
316 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_wait_empty()
453 ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *), in amdgpu_fence_driver_init_ring()
455 if (!ring->fence_drv.fences) in amdgpu_fence_driver_init_ring()
547 dma_fence_put(ring->fence_drv.fences[j]); in amdgpu_fence_driver_fini()
548 kfree(ring->fence_drv.fences); in amdgpu_fence_driver_fini()
549 ring->fence_drv.fences = NULL; in amdgpu_fence_driver_fini()
Damdgpu_debugfs.c928 struct dma_fence **fences) in amdgpu_ib_preempt_fences_swap() argument
944 ptr = &drv->fences[last_seq]; in amdgpu_ib_preempt_fences_swap()
952 fences[last_seq] = fence; in amdgpu_ib_preempt_fences_swap()
957 static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences, in amdgpu_ib_preempt_signal_fences() argument
964 fence = fences[i]; in amdgpu_ib_preempt_signal_fences()
1002 ptr = &drv->fences[preempt_seq]; in amdgpu_ib_preempt_mark_partial_job()
1019 struct dma_fence **fences = NULL; in amdgpu_debugfs_ib_preempt() local
1035 fences = kcalloc(length, sizeof(void *), GFP_KERNEL); in amdgpu_debugfs_ib_preempt()
1036 if (!fences) in amdgpu_debugfs_ib_preempt()
1060 amdgpu_ib_preempt_fences_swap(ring, fences); in amdgpu_debugfs_ib_preempt()
[all …]
Damdgpu_ctx.h34 struct dma_fence **fences; member
45 struct dma_fence **fences; member
Damdgpu_cs.c1560 struct drm_amdgpu_fence *fences) in amdgpu_cs_wait_all_fences() argument
1570 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); in amdgpu_cs_wait_all_fences()
1605 struct drm_amdgpu_fence *fences) in amdgpu_cs_wait_any_fence() argument
1623 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); in amdgpu_cs_wait_any_fence()
1673 struct drm_amdgpu_fence *fences; in amdgpu_cs_wait_fences_ioctl() local
1677 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence), in amdgpu_cs_wait_fences_ioctl()
1679 if (fences == NULL) in amdgpu_cs_wait_fences_ioctl()
1682 fences_user = u64_to_user_ptr(wait->in.fences); in amdgpu_cs_wait_fences_ioctl()
1683 if (copy_from_user(fences, fences_user, in amdgpu_cs_wait_fences_ioctl()
1690 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); in amdgpu_cs_wait_fences_ioctl()
[all …]
Damdgpu_sync.h38 DECLARE_HASHTABLE(fences, 4);
Damdgpu_vcn.c291 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0}; in amdgpu_vcn_idle_work_handler() local
319 fences += fence[j]; in amdgpu_vcn_idle_work_handler()
322 if (fences == 0) { in amdgpu_vcn_idle_work_handler()
350 unsigned int fences = 0; in amdgpu_vcn_ring_begin_use() local
354 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]); in amdgpu_vcn_ring_begin_use()
356 if (fences) in amdgpu_vcn_ring_begin_use()
Damdgpu_trace.h149 __field(u32, fences)
156 __entry->fences = amdgpu_fence_count_emitted(
161 __entry->fences)
/Linux-v5.4/Documentation/driver-api/
Dsync_file.rst9 the fences(struct dma_fence) that are needed to synchronize between drivers or
29 in-fences and out-fences
33 the driver to userspace we call the fences it contains 'out-fences'. They are
37 Out-fences are fences that the driver creates.
40 userspace we call these fence(s) 'in-fences'. Receiving in-fences means that
42 the in-fences.
72 of the Sync File to the kernel. The kernel can then retrieve the fences
/Linux-v5.4/drivers/gpu/drm/i915/selftests/
Di915_sw_fence.c453 struct i915_sw_fence **fences; in test_chain() local
457 fences = kmalloc_array(nfences, sizeof(*fences), GFP_KERNEL); in test_chain()
458 if (!fences) in test_chain()
462 fences[i] = alloc_fence(); in test_chain()
463 if (!fences[i]) { in test_chain()
470 ret = i915_sw_fence_await_sw_fence_gfp(fences[i], in test_chain()
471 fences[i - 1], in test_chain()
478 i915_sw_fence_commit(fences[i]); in test_chain()
484 if (i915_sw_fence_done(fences[i])) { in test_chain()
490 i915_sw_fence_commit(fences[0]); in test_chain()
[all …]
/Linux-v5.4/drivers/gpu/drm/radeon/
Dradeon_sa.c248 struct radeon_fence **fences, in radeon_sa_bo_next_hole() argument
278 fences[i] = sa_bo->fence; in radeon_sa_bo_next_hole()
317 struct radeon_fence *fences[RADEON_NUM_RINGS]; in radeon_sa_bo_new() local
336 fences[i] = NULL; in radeon_sa_bo_new()
350 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); in radeon_sa_bo_new()
353 radeon_fence_ref(fences[i]); in radeon_sa_bo_new()
356 r = radeon_fence_wait_any(rdev, fences, false); in radeon_sa_bo_new()
358 radeon_fence_unref(&fences[i]); in radeon_sa_bo_new()
Dradeon_trace.h36 __field(u32, fences)
42 __entry->fences = radeon_fence_count_emitted(
47 __entry->fences)
/Linux-v5.4/include/linux/
Ddma-fence-array.h43 struct dma_fence **fences; member
78 struct dma_fence **fences,
/Linux-v5.4/drivers/gpu/drm/i915/gem/
Di915_gem_execbuffer.c2345 __free_fence_array(struct drm_syncobj **fences, unsigned int n) in __free_fence_array() argument
2348 drm_syncobj_put(ptr_mask_bits(fences[n], 2)); in __free_fence_array()
2349 kvfree(fences); in __free_fence_array()
2358 struct drm_syncobj **fences; in get_fence_array() local
2369 SIZE_MAX / sizeof(*fences))) in get_fence_array()
2376 fences = kvmalloc_array(nfences, sizeof(*fences), in get_fence_array()
2378 if (!fences) in get_fence_array()
2405 fences[n] = ptr_pack_bits(syncobj, fence.flags, 2); in get_fence_array()
2408 return fences; in get_fence_array()
2411 __free_fence_array(fences, n); in get_fence_array()
[all …]
/Linux-v5.4/drivers/gpu/drm/virtio/
Dvirtgpu_fence.c98 list_add_tail(&fence->node, &drv->fences); in virtio_gpu_fence_emit()
116 list_for_each_entry_safe(fence, tmp, &drv->fences, node) { in virtio_gpu_fence_event_process()
/Linux-v5.4/drivers/gpu/drm/
Ddrm_gem.c1411 struct dma_fence **fences; in drm_gem_fence_array_add_implicit() local
1422 &fence_count, &fences); in drm_gem_fence_array_add_implicit()
1427 ret = drm_gem_fence_array_add(fence_array, fences[i]); in drm_gem_fence_array_add_implicit()
1433 dma_fence_put(fences[i]); in drm_gem_fence_array_add_implicit()
1434 kfree(fences); in drm_gem_fence_array_add_implicit()
/Linux-v5.4/tools/memory-model/Documentation/
Dexplanation.txt300 fences), such as calls to smp_rmb() or rcu_read_lock().
733 only internal operations. However, loads, stores, and fences involve
773 about the fence. However, fences do constrain the way CPUs and the
780 Strong fences, including smp_mb() and synchronize_rcu(), force
790 Acquire fences, such as smp_load_acquire(), force the CPU to
795 Release fences, such as smp_store_release(), force the CPU to
816 The propagation ordering enforced by release fences and strong fences
819 fence. We describe this property by saying that release fences and
820 strong fences are A-cumulative. By contrast, smp_wmb() fences are not
825 rcu_read_lock(), rcu_read_unlock(), and synchronize_rcu() fences have
[all …]

12