/Linux-v6.1/drivers/gpu/drm/i915/ |
D | i915_deps.c | 38 if (deps->fences != &deps->single) in i915_deps_reset_fences() 39 kfree(deps->fences); in i915_deps_reset_fences() 42 deps->fences = &deps->single; in i915_deps_reset_fences() 52 deps->fences = NULL; in i915_deps_init() 69 dma_fence_put(deps->fences[i]); in i915_deps_fini() 71 if (deps->fences != &deps->single) in i915_deps_fini() 72 kfree(deps->fences); in i915_deps_fini() 89 memcpy(new_fences, deps->fences, in i915_deps_grow() 91 swap(new_fences, deps->fences); in i915_deps_grow() 96 deps->fences[deps->num_deps++] = dma_fence_get(fence); in i915_deps_grow() [all …]
|
/Linux-v6.1/drivers/dma-buf/ |
D | dma-resv.c | 137 RCU_INIT_POINTER(obj->fences, NULL); in dma_resv_init() 151 dma_resv_list_free(rcu_dereference_protected(obj->fences, true)); in dma_resv_fini() 159 return rcu_dereference_check(obj->fences, dma_resv_held(obj)); in dma_resv_fences_list() 223 rcu_assign_pointer(obj->fences, new); in dma_resv_reserve_fences() 253 struct dma_resv_list *fences = dma_resv_fences_list(obj); in dma_resv_reset_max_fences() local 258 if (fences) in dma_resv_reset_max_fences() 259 fences->max_fences = fences->num_fences; in dma_resv_reset_max_fences() 358 cursor->fences = dma_resv_fences_list(cursor->obj); in dma_resv_iter_restart_unlocked() 359 if (cursor->fences) in dma_resv_iter_restart_unlocked() 360 cursor->num_fences = cursor->fences->num_fences; in dma_resv_iter_restart_unlocked() [all …]
|
D | dma-fence-unwrap.c | 64 struct dma_fence **fences, in __dma_fence_unwrap_merge() argument 74 dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) in __dma_fence_unwrap_merge() 93 fences[i] = dma_fence_unwrap_first(fences[i], &iter[i]); in __dma_fence_unwrap_merge() 104 while (fences[i] && dma_fence_is_signaled(fences[i])) in __dma_fence_unwrap_merge() 105 fences[i] = dma_fence_unwrap_next(&iter[i]); in __dma_fence_unwrap_merge() 107 next = fences[i]; in __dma_fence_unwrap_merge() 126 fences[i] = dma_fence_unwrap_next(&iter[i]); in __dma_fence_unwrap_merge() 129 fences[sel] = dma_fence_unwrap_next(&iter[sel]); in __dma_fence_unwrap_merge() 136 fences[sel] = dma_fence_unwrap_next(&iter[sel]); in __dma_fence_unwrap_merge()
|
D | dma-fence-array.c | 87 if (dma_fence_add_callback(array->fences[i], &cb[i].cb, in dma_fence_array_enable_signaling() 89 int error = array->fences[i]->error; in dma_fence_array_enable_signaling() 120 dma_fence_put(array->fences[i]); in dma_fence_array_release() 122 kfree(array->fences); in dma_fence_array_release() 155 struct dma_fence **fences, in dma_fence_array_create() argument 162 WARN_ON(!num_fences || !fences); in dma_fence_array_create() 177 array->fences = fences; in dma_fence_array_create() 193 WARN_ON(dma_fence_is_container(fences[num_fences])); in dma_fence_array_create() 217 if (array->fences[i]->context != context) in dma_fence_match_context() 239 return array->fences[0]; in dma_fence_array_first() [all …]
|
D | st-dma-fence-chain.c | 102 struct dma_fence **fences; member 124 fc->fences = kvmalloc_array(count, sizeof(*fc->fences), in fence_chains_init() 126 if (!fc->fences) { in fence_chains_init() 133 fc->fences[i] = mock_fence(); in fence_chains_init() 134 if (!fc->fences[i]) { in fence_chains_init() 140 fc->fences[i], in fence_chains_init() 157 dma_fence_put(fc->fences[i]); in fence_chains_init() 160 kvfree(fc->fences); in fence_chains_init() 171 dma_fence_signal(fc->fences[i]); in fence_chains_fini() 172 dma_fence_put(fc->fences[i]); in fence_chains_fini() [all …]
|
D | st-dma-fence-unwrap.c | 49 struct dma_fence **fences; in mock_array() local 53 fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); in mock_array() 54 if (!fences) in mock_array() 59 fences[i] = va_arg(valist, typeof(*fences)); in mock_array() 62 array = dma_fence_array_create(num_fences, fences, in mock_array() 70 kfree(fences); in mock_array() 75 dma_fence_put(va_arg(valist, typeof(*fences))); in mock_array()
|
D | st-dma-resv.c | 228 cursor.fences = (void*)~0; in test_for_each_unlocked() 247 struct dma_fence *f, **fences = NULL; in test_get_fences() local 274 r = dma_resv_get_fences(&resv, usage, &i, &fences); in test_get_fences() 280 if (i != 1 || fences[0] != f) { in test_get_fences() 288 dma_fence_put(fences[i]); in test_get_fences() 289 kfree(fences); in test_get_fences()
|
D | dma-fence.c | 810 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count, in dma_fence_test_signaled_any() argument 816 struct dma_fence *fence = fences[i]; in dma_fence_test_signaled_any() 847 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, in dma_fence_wait_any_timeout() argument 854 if (WARN_ON(!fences || !count || timeout < 0)) in dma_fence_wait_any_timeout() 859 if (dma_fence_is_signaled(fences[i])) { in dma_fence_wait_any_timeout() 875 struct dma_fence *fence = fences[i]; in dma_fence_wait_any_timeout() 893 if (dma_fence_test_signaled_any(fences, count, idx)) in dma_fence_wait_any_timeout() 906 dma_fence_remove_callback(fences[i], &cb[i].base); in dma_fence_wait_any_timeout()
|
D | st-dma-fence.c | 446 struct dma_fence __rcu **fences; member 477 rcu_assign_pointer(t->fences[t->id], f1); in thread_signal_callback() 482 f2 = dma_fence_get_rcu_safe(&t->fences[!t->id]); in thread_signal_callback() 514 rcu_assign_pointer(t->fences[t->id], NULL); in thread_signal_callback() 538 t[i].fences = f; in race_signal_callback()
|
/Linux-v6.1/Documentation/driver-api/ |
D | sync_file.rst | 9 the fences(struct dma_fence) that are needed to synchronize between drivers or 29 in-fences and out-fences 33 the driver to userspace we call the fences it contains 'out-fences'. They are 37 Out-fences are fences that the driver creates. 40 userspace we call these fence(s) 'in-fences'. Receiving in-fences means that 42 the in-fences. 72 of the Sync File to the kernel. The kernel can then retrieve the fences
|
D | dma-buf.rst | 147 :doc: DMA fences overview 209 * Future fences, used in HWC1 to signal when a buffer isn't used by the display 213 * Proxy fences, proposed to handle &drm_syncobj for which the fence has not yet 216 * Userspace fences or gpu futexes, fine-grained locking within a command buffer 222 batch DMA fences for memory management instead of context preemption DMA 223 fences which get reattached when the compute job is rescheduled. 226 fences and controls when they fire. Mixing indefinite fences with normal 227 in-kernel DMA fences does not work, even when a fallback timeout is included to 233 * Only userspace knows about all dependencies in indefinite fences and when 237 for memory management needs, which means we must support indefinite fences being [all …]
|
/Linux-v6.1/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_sync.c | 54 hash_init(sync->fences); in amdgpu_sync_create() 137 hash_for_each_possible(sync->fences, e, node, f->context) { in amdgpu_sync_add_later() 169 hash_add(sync->fences, &e->node, f->context); in amdgpu_sync_fence() 278 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_peek_fence() 320 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_get_fence() 351 hash_for_each_safe(source->fences, i, tmp, e, node) { in amdgpu_sync_clone() 373 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_wait() 399 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_free()
|
D | amdgpu_sa.c | 207 struct dma_fence **fences, in amdgpu_sa_bo_next_hole() argument 229 fences[i] = NULL; in amdgpu_sa_bo_next_hole() 238 fences[i] = sa_bo->fence; in amdgpu_sa_bo_next_hole() 279 struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; in amdgpu_sa_bo_new() local 314 } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); in amdgpu_sa_bo_new() 317 if (fences[i]) in amdgpu_sa_bo_new() 318 fences[count++] = dma_fence_get(fences[i]); in amdgpu_sa_bo_new() 322 t = dma_fence_wait_any_timeout(fences, count, false, in amdgpu_sa_bo_new() 326 dma_fence_put(fences[i]); in amdgpu_sa_bo_new()
|
D | amdgpu_ids.c | 187 struct dma_fence **fences; in amdgpu_vmid_grab_idle() local 194 fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL); in amdgpu_vmid_grab_idle() 195 if (!fences) in amdgpu_vmid_grab_idle() 205 fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r); in amdgpu_vmid_grab_idle() 206 if (!fences[i]) in amdgpu_vmid_grab_idle() 220 dma_fence_get(fences[j]); in amdgpu_vmid_grab_idle() 222 array = dma_fence_array_create(i, fences, fence_context, in amdgpu_vmid_grab_idle() 226 dma_fence_put(fences[j]); in amdgpu_vmid_grab_idle() 227 kfree(fences); in amdgpu_vmid_grab_idle() 236 kfree(fences); in amdgpu_vmid_grab_idle()
|
D | amdgpu_fence.c | 186 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_emit() 298 ptr = &drv->fences[last_seq]; in amdgpu_fence_process() 349 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_wait_empty() 478 ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *), in amdgpu_fence_driver_init_ring() 481 if (!ring->fence_drv.fences) in amdgpu_fence_driver_init_ring() 571 dma_fence_put(ring->fence_drv.fences[j]); in amdgpu_fence_driver_sw_fini() 572 kfree(ring->fence_drv.fences); in amdgpu_fence_driver_sw_fini() 573 ring->fence_drv.fences = NULL; in amdgpu_fence_driver_sw_fini() 618 ptr = &ring->fence_drv.fences[i]; in amdgpu_fence_driver_clear_job_fences()
|
D | amdgpu_jpeg.c | 78 unsigned int fences = 0; in amdgpu_jpeg_idle_work_handler() local 85 fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec); in amdgpu_jpeg_idle_work_handler() 88 if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt)) in amdgpu_jpeg_idle_work_handler()
|
D | amdgpu_ctx.c | 195 res = ktime_add(res, amdgpu_ctx_fence_time(centity->fences[i])); in amdgpu_ctx_entity_time() 212 entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs), in amdgpu_ctx_init_entity() 267 res = ktime_add(res, amdgpu_ctx_fence_time(entity->fences[i])); in amdgpu_ctx_fini_entity() 268 dma_fence_put(entity->fences[i]); in amdgpu_ctx_fini_entity() 729 other = centity->fences[idx]; in amdgpu_ctx_add_fence() 735 centity->fences[idx] = fence; in amdgpu_ctx_add_fence() 769 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]); in amdgpu_ctx_get_fence() 831 other = dma_fence_get(centity->fences[idx]); in amdgpu_ctx_wait_prev_fence()
|
D | amdgpu_cs.c | 1592 struct drm_amdgpu_fence *fences) in amdgpu_cs_wait_all_fences() argument 1602 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); in amdgpu_cs_wait_all_fences() 1637 struct drm_amdgpu_fence *fences) in amdgpu_cs_wait_any_fence() argument 1655 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); in amdgpu_cs_wait_any_fence() 1705 struct drm_amdgpu_fence *fences; in amdgpu_cs_wait_fences_ioctl() local 1709 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence), in amdgpu_cs_wait_fences_ioctl() 1711 if (fences == NULL) in amdgpu_cs_wait_fences_ioctl() 1714 fences_user = u64_to_user_ptr(wait->in.fences); in amdgpu_cs_wait_fences_ioctl() 1715 if (copy_from_user(fences, fences_user, in amdgpu_cs_wait_fences_ioctl() 1722 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); in amdgpu_cs_wait_fences_ioctl() [all …]
|
/Linux-v6.1/drivers/gpu/drm/i915/selftests/ |
D | i915_sw_fence.c | 453 struct i915_sw_fence **fences; in test_chain() local 457 fences = kmalloc_array(nfences, sizeof(*fences), GFP_KERNEL); in test_chain() 458 if (!fences) in test_chain() 462 fences[i] = alloc_fence(); in test_chain() 463 if (!fences[i]) { in test_chain() 470 ret = i915_sw_fence_await_sw_fence_gfp(fences[i], in test_chain() 471 fences[i - 1], in test_chain() 478 i915_sw_fence_commit(fences[i]); in test_chain() 484 if (i915_sw_fence_done(fences[i])) { in test_chain() 490 i915_sw_fence_commit(fences[0]); in test_chain() [all …]
|
/Linux-v6.1/drivers/gpu/drm/radeon/ |
D | radeon_sa.c | 248 struct radeon_fence **fences, in radeon_sa_bo_next_hole() argument 270 fences[i] = NULL; in radeon_sa_bo_next_hole() 280 fences[i] = sa_bo->fence; in radeon_sa_bo_next_hole() 319 struct radeon_fence *fences[RADEON_NUM_RINGS]; in radeon_sa_bo_new() local 350 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); in radeon_sa_bo_new() 353 radeon_fence_ref(fences[i]); in radeon_sa_bo_new() 356 r = radeon_fence_wait_any(rdev, fences, false); in radeon_sa_bo_new() 358 radeon_fence_unref(&fences[i]); in radeon_sa_bo_new()
|
D | radeon_trace.h | 36 __field(u32, fences) 42 __entry->fences = radeon_fence_count_emitted( 47 __entry->fences)
|
/Linux-v6.1/drivers/gpu/drm/i915/gem/ |
D | i915_gem_execbuffer.c | 311 struct eb_fence *fences; member 2732 __free_fence_array(struct eb_fence *fences, unsigned int n) in __free_fence_array() argument 2735 drm_syncobj_put(ptr_mask_bits(fences[n].syncobj, 2)); in __free_fence_array() 2736 dma_fence_put(fences[n].dma_fence); in __free_fence_array() 2737 dma_fence_chain_free(fences[n].chain_fence); in __free_fence_array() 2739 kvfree(fences); in __free_fence_array() 2771 f = krealloc(eb->fences, in add_timeline_fence_array() 2777 eb->fences = f; in add_timeline_fence_array() 2895 f = krealloc(eb->fences, in add_fence_array() 2901 eb->fences = f; in add_fence_array() [all …]
|
/Linux-v6.1/include/linux/ |
D | dma-fence-array.h | 43 struct dma_fence **fences; member 80 struct dma_fence **fences,
|
D | dma-resv.h | 178 struct dma_resv_list __rcu *fences; member 210 struct dma_resv_list *fences; member 476 unsigned int *num_fences, struct dma_fence ***fences);
|
/Linux-v6.1/drivers/gpu/drm/virtio/ |
D | virtgpu_fence.c | 111 list_add_tail(&fence->node, &drv->fences); in virtio_gpu_fence_emit() 136 list_for_each_entry_safe(curr, tmp, &drv->fences, node) { in virtio_gpu_fence_event_process() 146 list_for_each_entry_safe(curr, tmp, &drv->fences, node) { in virtio_gpu_fence_event_process()
|