/Linux-v4.19/drivers/dma-buf/ |
D | sync_file.c | 158 struct dma_fence **fences, int num_fences) in sync_file_set_fence() argument 169 sync_file->fence = fences[0]; in sync_file_set_fence() 170 kfree(fences); in sync_file_set_fence() 172 array = dma_fence_array_create(num_fences, fences, in sync_file_set_fence() 191 return array->fences; in get_fences() 198 static void add_fence(struct dma_fence **fences, in add_fence() argument 201 fences[*i] = fence; in add_fence() 223 struct dma_fence **fences, **nfences, **a_fences, **b_fences; in sync_file_merge() local 237 fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); in sync_file_merge() 238 if (!fences) in sync_file_merge() [all …]
|
D | dma-fence-array.c | 72 if (dma_fence_add_callback(array->fences[i], &cb[i].cb, in dma_fence_array_enable_signaling() 96 dma_fence_put(array->fences[i]); in dma_fence_array_release() 98 kfree(array->fences); in dma_fence_array_release() 131 struct dma_fence **fences, in dma_fence_array_create() argument 151 array->fences = fences; in dma_fence_array_create() 175 if (array->fences[i]->context != context) in dma_fence_match_context()
|
D | dma-fence.c | 496 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count, in dma_fence_test_signaled_any() argument 502 struct dma_fence *fence = fences[i]; in dma_fence_test_signaled_any() 533 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, in dma_fence_wait_any_timeout() argument 540 if (WARN_ON(!fences || !count || timeout < 0)) in dma_fence_wait_any_timeout() 545 if (dma_fence_is_signaled(fences[i])) { in dma_fence_wait_any_timeout() 561 struct dma_fence *fence = fences[i]; in dma_fence_wait_any_timeout() 579 if (dma_fence_test_signaled_any(fences, count, idx)) in dma_fence_wait_any_timeout() 592 dma_fence_remove_callback(fences[i], &cb[i].base); in dma_fence_wait_any_timeout()
|
D | sync_debug.c | 144 sync_print_fence(s, array->fences[i], true); in sync_print_sync_file()
|
/Linux-v4.19/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_ids.c | 110 struct dma_fence *fence, **fences; in amdgpu_pasid_free_delayed() local 115 r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences); in amdgpu_pasid_free_delayed() 125 fence = fences[0]; in amdgpu_pasid_free_delayed() 126 kfree(fences); in amdgpu_pasid_free_delayed() 131 array = dma_fence_array_create(count, fences, context, in amdgpu_pasid_free_delayed() 134 kfree(fences); in amdgpu_pasid_free_delayed() 204 struct dma_fence **fences; in amdgpu_vmid_grab_idle() local 211 fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); in amdgpu_vmid_grab_idle() 212 if (!fences) in amdgpu_vmid_grab_idle() 218 fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring); in amdgpu_vmid_grab_idle() [all …]
|
D | amdgpu_sync.c | 53 hash_init(sync->fences); in amdgpu_sync_create() 137 hash_for_each_possible(sync->fences, e, node, f->context) { in amdgpu_sync_add_later() 178 hash_add(sync->fences, &e->node, f->context); in amdgpu_sync_fence() 266 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_peek_fence() 308 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_get_fence() 341 hash_for_each_safe(source->fences, i, tmp, e, node) { in amdgpu_sync_clone() 366 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_wait() 392 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_free()
|
D | amdgpu_sa.c | 207 struct dma_fence **fences, in amdgpu_sa_bo_next_hole() argument 236 fences[i] = sa_bo->fence; in amdgpu_sa_bo_next_hole() 277 struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; in amdgpu_sa_bo_new() local 300 fences[i] = NULL; in amdgpu_sa_bo_new() 314 } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); in amdgpu_sa_bo_new() 317 if (fences[i]) in amdgpu_sa_bo_new() 318 fences[count++] = dma_fence_get(fences[i]); in amdgpu_sa_bo_new() 322 t = dma_fence_wait_any_timeout(fences, count, false, in amdgpu_sa_bo_new() 326 dma_fence_put(fences[i]); in amdgpu_sa_bo_new()
|
D | amdgpu_ctx.c | 65 ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS, in amdgpu_ctx_init() 67 if (!ctx->fences) in amdgpu_ctx_init() 74 ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i]; in amdgpu_ctx_init() 108 kfree(ctx->fences); in amdgpu_ctx_init() 109 ctx->fences = NULL; in amdgpu_ctx_init() 124 dma_fence_put(ctx->rings[i].fences[j]); in amdgpu_ctx_fini() 125 kfree(ctx->fences); in amdgpu_ctx_fini() 126 ctx->fences = NULL; in amdgpu_ctx_fini() 346 other = cring->fences[idx]; in amdgpu_ctx_add_fence() 353 cring->fences[idx] = fence; in amdgpu_ctx_add_fence() [all …]
|
D | amdgpu_fence.c | 155 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_emit() 246 ptr = &drv->fences[last_seq]; in amdgpu_fence_process() 298 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_wait_empty() 431 ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *), in amdgpu_fence_driver_init_ring() 433 if (!ring->fence_drv.fences) in amdgpu_fence_driver_init_ring() 506 dma_fence_put(ring->fence_drv.fences[j]); in amdgpu_fence_driver_fini() 507 kfree(ring->fence_drv.fences); in amdgpu_fence_driver_fini() 508 ring->fence_drv.fences = NULL; in amdgpu_fence_driver_fini()
|
D | amdgpu_cs.c | 1487 struct drm_amdgpu_fence *fences) in amdgpu_cs_wait_all_fences() argument 1497 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); in amdgpu_cs_wait_all_fences() 1532 struct drm_amdgpu_fence *fences) in amdgpu_cs_wait_any_fence() argument 1550 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); in amdgpu_cs_wait_any_fence() 1600 struct drm_amdgpu_fence *fences; in amdgpu_cs_wait_fences_ioctl() local 1604 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence), in amdgpu_cs_wait_fences_ioctl() 1606 if (fences == NULL) in amdgpu_cs_wait_fences_ioctl() 1609 fences_user = u64_to_user_ptr(wait->in.fences); in amdgpu_cs_wait_fences_ioctl() 1610 if (copy_from_user(fences, fences_user, in amdgpu_cs_wait_fences_ioctl() 1617 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); in amdgpu_cs_wait_fences_ioctl() [all …]
|
D | amdgpu_sync.h | 38 DECLARE_HASHTABLE(fences, 4);
|
D | amdgpu_vcn.c | 210 unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec); in amdgpu_vcn_idle_work_handler() local 214 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]); in amdgpu_vcn_idle_work_handler() 217 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg); in amdgpu_vcn_idle_work_handler() 219 if (fences == 0) { in amdgpu_vcn_idle_work_handler()
|
D | amdgpu_trace.h | 148 __field(u32, fences) 155 __entry->fences = amdgpu_fence_count_emitted( 160 __entry->fences)
|
D | amdgpu_uvd.c | 1180 unsigned fences = 0, i, j; in amdgpu_uvd_idle_work_handler() local 1185 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring); in amdgpu_uvd_idle_work_handler() 1187 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]); in amdgpu_uvd_idle_work_handler() 1191 if (fences == 0) { in amdgpu_uvd_idle_work_handler()
|
/Linux-v4.19/Documentation/ |
D | sync_file.txt | 9 the fences(struct dma_fence) that are needed to synchronize between drivers or 29 in-fences and out-fences 33 the driver to userspace we call the fences it contains 'out-fences'. They are 37 Out-fences are fences that the driver creates. 40 userspace we call these fence(s) 'in-fences'. Receiving in-fences means that 42 the in-fences. 72 of the Sync File to the kernel. The kernel can then retrieve the fences
|
/Linux-v4.19/drivers/gpu/drm/i915/selftests/ |
D | i915_sw_fence.c | 453 struct i915_sw_fence **fences; in test_chain() local 457 fences = kmalloc_array(nfences, sizeof(*fences), GFP_KERNEL); in test_chain() 458 if (!fences) in test_chain() 462 fences[i] = alloc_fence(); in test_chain() 463 if (!fences[i]) { in test_chain() 470 ret = i915_sw_fence_await_sw_fence_gfp(fences[i], in test_chain() 471 fences[i - 1], in test_chain() 478 i915_sw_fence_commit(fences[i]); in test_chain() 484 if (i915_sw_fence_done(fences[i])) { in test_chain() 490 i915_sw_fence_commit(fences[0]); in test_chain() [all …]
|
/Linux-v4.19/drivers/gpu/drm/radeon/ |
D | radeon_sa.c | 248 struct radeon_fence **fences, in radeon_sa_bo_next_hole() argument 278 fences[i] = sa_bo->fence; in radeon_sa_bo_next_hole() 317 struct radeon_fence *fences[RADEON_NUM_RINGS]; in radeon_sa_bo_new() local 336 fences[i] = NULL; in radeon_sa_bo_new() 350 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); in radeon_sa_bo_new() 353 radeon_fence_ref(fences[i]); in radeon_sa_bo_new() 356 r = radeon_fence_wait_any(rdev, fences, false); in radeon_sa_bo_new() 358 radeon_fence_unref(&fences[i]); in radeon_sa_bo_new()
|
D | radeon_trace.h | 36 __field(u32, fences) 42 __entry->fences = radeon_fence_count_emitted( 47 __entry->fences)
|
D | radeon_fence.c | 606 struct radeon_fence **fences, in radeon_fence_wait_any() argument 616 if (!fences[i]) { in radeon_fence_wait_any() 620 seq[i] = fences[i]->seq; in radeon_fence_wait_any()
|
/Linux-v4.19/include/linux/ |
D | dma-fence-array.h | 50 struct dma_fence **fences; member 85 struct dma_fence **fences,
|
D | dma-fence.h | 511 signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
|
/Linux-v4.19/drivers/gpu/drm/i915/ |
D | i915_gem_execbuffer.c | 2045 __free_fence_array(struct drm_syncobj **fences, unsigned int n) in __free_fence_array() argument 2048 drm_syncobj_put(ptr_mask_bits(fences[n], 2)); in __free_fence_array() 2049 kvfree(fences); in __free_fence_array() 2058 struct drm_syncobj **fences; in get_fence_array() local 2069 SIZE_MAX / sizeof(*fences))) in get_fence_array() 2076 fences = kvmalloc_array(nfences, sizeof(*fences), in get_fence_array() 2078 if (!fences) in get_fence_array() 2105 fences[n] = ptr_pack_bits(syncobj, fence.flags, 2); in get_fence_array() 2108 return fences; in get_fence_array() 2111 __free_fence_array(fences, n); in get_fence_array() [all …]
|
/Linux-v4.19/drivers/gpu/drm/virtio/ |
D | virtgpu_fence.c | 87 list_add_tail(&(*fence)->node, &drv->fences); in virtio_gpu_fence_emit() 104 list_for_each_entry_safe(fence, tmp, &drv->fences, node) { in virtio_gpu_fence_event_process()
|
/Linux-v4.19/tools/memory-model/ |
D | linux-kernel.cat | 67 (* Propagation: Ordering from release operations and strong fences. *) 74 * No fences needed here for prop because relation confined to one process.
|
/Linux-v4.19/tools/memory-model/Documentation/ |
D | explanation.txt | 298 fences), such as calls to smp_rmb() or rcu_read_lock(). 737 only internal operations. However, loads, stores, and fences involve 777 about the fence. However, fences do constrain the way CPUs and the 784 Strong fences, including smp_mb() and synchronize_rcu(), force 794 Acquire fences, such as smp_load_acquire(), force the CPU to 799 Release fences, such as smp_store_release(), force the CPU to 820 The propagation ordering enforced by release fences and strong fences 823 fence. We describe this property by saying that release fences and 824 strong fences are A-cumulative. By contrast, smp_wmb() fences are not 829 rcu_read_lock(), rcu_read_unlock(), and synchronize_rcu() fences have [all …]
|