/Linux-v6.6/include/drm/ |
D | gpu_scheduler.h | 88 struct drm_sched_entity { struct 260 struct drm_sched_entity *current_entity; argument 354 struct drm_sched_entity *entity; 406 struct drm_sched_entity *s_entity); 529 struct drm_sched_entity *entity, 546 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, 559 struct drm_sched_entity *entity); 563 struct drm_sched_entity *entity); 565 struct drm_sched_entity *entity); 567 void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts); [all …]
|
/Linux-v6.6/drivers/gpu/drm/scheduler/ |
D | sched_entity.c | 59 int drm_sched_entity_init(struct drm_sched_entity *entity, in drm_sched_entity_init() 68 memset(entity, 0, sizeof(struct drm_sched_entity)); in drm_sched_entity_init() 108 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, in drm_sched_entity_modify_sched() 119 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) in drm_sched_entity_is_idle() 132 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) in drm_sched_entity_is_ready() 150 int drm_sched_entity_error(struct drm_sched_entity *entity) in drm_sched_entity_error() 215 static void drm_sched_entity_kill(struct drm_sched_entity *entity) in drm_sched_entity_kill() 259 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) in drm_sched_entity_flush() 305 void drm_sched_entity_fini(struct drm_sched_entity *entity) in drm_sched_entity_fini() 332 void drm_sched_entity_destroy(struct drm_sched_entity *entity) in drm_sched_entity_destroy() [all …]
|
D | sched_main.c | 82 struct drm_sched_entity *ent_a = rb_entry((a), struct drm_sched_entity, rb_tree_node); in drm_sched_entity_compare_before() 83 struct drm_sched_entity *ent_b = rb_entry((b), struct drm_sched_entity, rb_tree_node); in drm_sched_entity_compare_before() 88 static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity) in drm_sched_rq_remove_fifo_locked() 98 void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts) in drm_sched_rq_update_fifo() 146 struct drm_sched_entity *entity) in drm_sched_rq_add_entity() 168 struct drm_sched_entity *entity) in drm_sched_rq_remove_entity() 194 static struct drm_sched_entity * 197 struct drm_sched_entity *entity; in drm_sched_rq_select_entity_rr() 238 static struct drm_sched_entity * 245 struct drm_sched_entity *entity; in drm_sched_rq_select_entity_fifo() [all …]
|
D | gpu_scheduler_trace.h | 36 TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity), 39 __field(struct drm_sched_entity *, entity) 63 TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity), 68 TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
|
D | sched_fence.c | 208 struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity, in drm_sched_fence_alloc() 224 struct drm_sched_entity *entity) in drm_sched_fence_init()
|
/Linux-v6.6/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_ctx.h | 41 struct drm_sched_entity entity; 77 u32 ring, struct drm_sched_entity **entity); 79 struct drm_sched_entity *entity, 82 struct drm_sched_entity *entity, 91 struct drm_sched_entity *entity);
|
D | amdgpu_job.h | 89 struct drm_sched_entity *entity, void *owner, 92 struct drm_sched_entity *entity, void *owner,
|
D | amdgpu_job.c | 94 struct drm_sched_entity *entity, void *owner, in amdgpu_job_alloc() 122 struct drm_sched_entity *entity, void *owner, in amdgpu_job_alloc_with_ib() 254 struct drm_sched_entity *s_entity) in amdgpu_job_prepare_job() 324 struct drm_sched_entity *s_entity = NULL; in amdgpu_job_stop_all_jobs_on_sched()
|
D | amdgpu_ctx.c | 434 u32 ring, struct drm_sched_entity **entity) in amdgpu_ctx_get_entity() 437 struct drm_sched_entity *ctx_entity; in amdgpu_ctx_get_entity() 749 struct drm_sched_entity *entity, in amdgpu_ctx_add_fence() 776 struct drm_sched_entity *entity, in amdgpu_ctx_get_fence() 851 struct drm_sched_entity *entity) in amdgpu_ctx_wait_prev_fence() 899 struct drm_sched_entity *entity; in amdgpu_ctx_mgr_entity_flush() 929 struct drm_sched_entity *entity; in amdgpu_ctx_mgr_entity_fini()
|
D | amdgpu_ttm.h | 67 struct drm_sched_entity high_pr; 69 struct drm_sched_entity low_pr;
|
D | amdgpu_cs.h | 61 struct drm_sched_entity *entities[AMDGPU_CS_GANG_SIZE];
|
D | amdgpu_vce.h | 51 struct drm_sched_entity entity;
|
D | amdgpu_uvd.h | 65 struct drm_sched_entity entity;
|
D | amdgpu_vm.h | 299 struct drm_sched_entity immediate; 300 struct drm_sched_entity delayed;
|
D | amdgpu_vm_sdma.c | 56 struct drm_sched_entity *entity = p->immediate ? &p->vm->immediate in amdgpu_vm_sdma_alloc_job()
|
D | amdgpu_cs.c | 76 struct drm_sched_entity *entity; in amdgpu_cs_job_idx() 394 struct drm_sched_entity *entity; in amdgpu_cs_p2_dependencies() 1461 struct drm_sched_entity *entity; in amdgpu_cs_wait_ioctl() 1509 struct drm_sched_entity *entity; in amdgpu_cs_get_fence()
|
/Linux-v6.6/drivers/gpu/drm/msm/ |
D | msm_submitqueue.c | 121 static struct drm_sched_entity * 137 struct drm_sched_entity *entity; in get_sched_entity()
|
D | msm_gpu.h | 430 struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS]; 516 struct drm_sched_entity *entity;
|
/Linux-v6.6/drivers/gpu/drm/lima/ |
D | lima_sched.h | 37 struct drm_sched_entity base;
|
/Linux-v6.6/drivers/gpu/drm/nouveau/ |
D | nouveau_sched.h | 102 struct drm_sched_entity base;
|
/Linux-v6.6/drivers/gpu/drm/etnaviv/ |
D | etnaviv_drv.h | 34 struct drm_sched_entity sched_entity[ETNA_MAX_PIPES];
|
/Linux-v6.6/drivers/gpu/drm/panfrost/ |
D | panfrost_device.h | 141 struct drm_sched_entity sched_entity[NUM_JOB_SLOTS];
|
D | panfrost_job.c | 901 struct drm_sched_entity *entity = &panfrost_priv->sched_entity[i]; in panfrost_job_close()
|
/Linux-v6.6/drivers/gpu/drm/v3d/ |
D | v3d_drv.h | 169 struct drm_sched_entity sched_entity[V3D_MAX_QUEUES];
|
/Linux-v6.6/Documentation/gpu/rfc/ |
D | xe.rst | 85 drm_sched_entity. 175 drm_sched_entity) and making sure drm_scheduler can cope with the lack of job
|