/Linux-v5.15/drivers/gpu/host1x/ |
D | job.c | 3 * Tegra host1x Job 21 #include "job.h" 30 struct host1x_job *job = NULL; in host1x_job_alloc() local 51 mem = job = kzalloc(total, GFP_KERNEL); in host1x_job_alloc() 52 if (!job) in host1x_job_alloc() 55 job->enable_firewall = enable_firewall; in host1x_job_alloc() 57 kref_init(&job->ref); in host1x_job_alloc() 58 job->channel = ch; in host1x_job_alloc() 62 job->relocs = num_relocs ? mem : NULL; in host1x_job_alloc() 64 job->unpins = num_unpins ? mem : NULL; in host1x_job_alloc() [all …]
|
D | cdma.c | 23 #include "job.h" 270 * Start timer that tracks the time spent by the job. 274 struct host1x_job *job) in cdma_start_timer_locked() argument 281 cdma->timeout.client = job->client; in cdma_start_timer_locked() 282 cdma->timeout.syncpt = job->syncpt; in cdma_start_timer_locked() 283 cdma->timeout.syncpt_val = job->syncpt_end; in cdma_start_timer_locked() 287 msecs_to_jiffies(job->timeout)); in cdma_start_timer_locked() 313 struct host1x_job *job, *n; in update_cdma_locked() local 319 list_for_each_entry_safe(job, n, &cdma->sync_queue, list) { in update_cdma_locked() 320 struct host1x_syncpt *sp = job->syncpt; in update_cdma_locked() [all …]
|
/Linux-v5.15/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_job.c | 36 struct amdgpu_job *job = to_amdgpu_job(s_job); in amdgpu_job_timedout() local 45 /* Effectively the job is aborted as the device is gone */ in amdgpu_job_timedout() 52 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { in amdgpu_job_timedout() 58 amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); in amdgpu_job_timedout() 60 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), in amdgpu_job_timedout() 66 amdgpu_device_gpu_recover(ring->adev, job); in amdgpu_job_timedout() 79 struct amdgpu_job **job, struct amdgpu_vm *vm) in amdgpu_job_alloc() argument 88 *job = kzalloc(size, GFP_KERNEL); in amdgpu_job_alloc() 89 if (!*job) in amdgpu_job_alloc() 96 (*job)->base.sched = &adev->rings[0]->sched; in amdgpu_job_alloc() [all …]
|
D | amdgpu_ib.c | 110 * @job: job to schedule 127 struct amdgpu_ib *ibs, struct amdgpu_job *job, in amdgpu_ib_schedule() argument 148 /* ring tests don't use a job */ in amdgpu_ib_schedule() 149 if (job) { in amdgpu_ib_schedule() 150 vm = job->vm; in amdgpu_ib_schedule() 151 fence_ctx = job->base.s_fence ? in amdgpu_ib_schedule() 152 job->base.s_fence->scheduled.context : 0; in amdgpu_ib_schedule() 163 if (vm && !job->vmid) { in amdgpu_ib_schedule() 184 if (ring->funcs->emit_pipeline_sync && job && in amdgpu_ib_schedule() 185 ((tmp = amdgpu_sync_get_fence(&job->sched_sync)) || in amdgpu_ib_schedule() [all …]
|
D | amdgpu_job.h | 38 #define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0) argument 67 /* job_run_counter >= 1 means a resubmit job */ 72 struct amdgpu_job **job, struct amdgpu_vm *vm); 74 enum amdgpu_ib_pool_type pool, struct amdgpu_job **job); 75 void amdgpu_job_free_resources(struct amdgpu_job *job); 76 void amdgpu_job_free(struct amdgpu_job *job); 77 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, 79 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
/Linux-v5.15/drivers/md/ |
D | dm-kcopyd.c | 40 MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients"); 347 * Error state of the job. 367 * Set this to ensure you are notified when the job has 374 * These fields are only used if the job has been split 408 * Functions to push and pop a job onto the head of a given job 414 struct kcopyd_job *job; in pop_io_job() local 420 list_for_each_entry(job, jobs, list) { in pop_io_job() 421 if (job->rw == READ || !(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) { in pop_io_job() 422 list_del(&job->list); in pop_io_job() 423 return job; in pop_io_job() [all …]
|
/Linux-v5.15/drivers/gpu/drm/v3d/ |
D | v3d_sched.c | 10 * scheduler will round-robin between clients to submit the next job. 13 * jobs when bulk background jobs are queued up, we submit a new job 60 struct v3d_job *job = to_v3d_job(sched_job); in v3d_job_free() local 63 v3d_job_put(job); in v3d_job_free() 67 v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job) in v3d_switch_perfmon() argument 69 if (job->perfmon != v3d->active_perfmon) in v3d_switch_perfmon() 72 if (job->perfmon && v3d->active_perfmon != job->perfmon) in v3d_switch_perfmon() 73 v3d_perfmon_start(v3d, job->perfmon); in v3d_switch_perfmon() 77 * Returns the fences that the job depends on, one by one. 86 struct v3d_job *job = to_v3d_job(sched_job); in v3d_job_dependency() local [all …]
|
D | v3d_gem.c | 169 * need to wait for completion before dispatching the job -- in v3d_flush_l2t() 173 * synchronously clean after a job. in v3d_flush_l2t() 186 * signaling job completion. So, we synchronously wait before 252 v3d_lock_bo_reservations(struct v3d_job *job, in v3d_lock_bo_reservations() argument 257 ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx); in v3d_lock_bo_reservations() 261 for (i = 0; i < job->bo_count; i++) { in v3d_lock_bo_reservations() 262 ret = drm_gem_fence_array_add_implicit(&job->deps, in v3d_lock_bo_reservations() 263 job->bo[i], true); in v3d_lock_bo_reservations() 265 drm_gem_unlock_reservations(job->bo, job->bo_count, in v3d_lock_bo_reservations() 275 * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects [all …]
|
/Linux-v5.15/block/ |
D | bsg-lib.c | 31 struct bsg_job *job; in bsg_transport_sg_io_fn() local 48 job = blk_mq_rq_to_pdu(rq); in bsg_transport_sg_io_fn() 49 job->request_len = hdr->request_len; in bsg_transport_sg_io_fn() 50 job->request = memdup_user(uptr64(hdr->request), hdr->request_len); in bsg_transport_sg_io_fn() 51 if (IS_ERR(job->request)) { in bsg_transport_sg_io_fn() 52 ret = PTR_ERR(job->request); in bsg_transport_sg_io_fn() 57 job->bidi_rq = blk_get_request(rq->q, REQ_OP_DRV_IN, 0); in bsg_transport_sg_io_fn() 58 if (IS_ERR(job->bidi_rq)) { in bsg_transport_sg_io_fn() 59 ret = PTR_ERR(job->bidi_rq); in bsg_transport_sg_io_fn() 63 ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL, in bsg_transport_sg_io_fn() [all …]
|
/Linux-v5.15/drivers/gpu/drm/panfrost/ |
D | panfrost_job.c | 105 static int panfrost_job_get_slot(struct panfrost_job *job) in panfrost_job_get_slot() argument 111 if (job->requirements & PANFROST_JD_REQ_FS) in panfrost_job_get_slot() 116 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) { in panfrost_job_get_slot() 117 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) && in panfrost_job_get_slot() 118 (job->pfdev->features.nr_core_groups == 2)) in panfrost_job_get_slot() 120 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987)) in panfrost_job_get_slot() 145 panfrost_get_job_chain_flag(const struct panfrost_job *job) in panfrost_get_job_chain_flag() argument 147 struct panfrost_fence *f = to_panfrost_fence(job->done_fence); in panfrost_get_job_chain_flag() 149 if (!panfrost_has_hw_feature(job->pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) in panfrost_get_job_chain_flag() 158 struct panfrost_job *job = pfdev->jobs[slot][0]; in panfrost_dequeue_job() local [all …]
|
D | panfrost_drv.c | 113 * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects 114 * referenced by the job. 118 * @job: job being set up 120 * Resolve handles from userspace to BOs and attach them to job. 129 struct panfrost_job *job) in panfrost_lookup_bos() argument 136 job->bo_count = args->bo_handle_count; in panfrost_lookup_bos() 138 if (!job->bo_count) in panfrost_lookup_bos() 143 job->bo_count, &job->bos); in panfrost_lookup_bos() 147 job->mappings = kvmalloc_array(job->bo_count, in panfrost_lookup_bos() 150 if (!job->mappings) in panfrost_lookup_bos() [all …]
|
/Linux-v5.15/drivers/gpu/host1x/hw/ |
D | channel_hw.c | 17 #include "../job.h" 82 static void submit_gathers(struct host1x_job *job, u32 job_syncpt_base) in submit_gathers() argument 84 struct host1x_cdma *cdma = &job->channel->cdma; in submit_gathers() 86 struct device *dev = job->channel->dev; in submit_gathers() 91 for (i = 0; i < job->num_cmds; i++) { in submit_gathers() 92 struct host1x_job_cmd *cmd = &job->cmds[i]; in submit_gathers() 132 static inline void synchronize_syncpt_base(struct host1x_job *job) in synchronize_syncpt_base() argument 134 struct host1x_syncpt *sp = job->syncpt; in synchronize_syncpt_base() 141 host1x_cdma_push(&job->channel->cdma, in synchronize_syncpt_base() 162 static int channel_submit(struct host1x_job *job) in channel_submit() argument [all …]
|
D | debug_hw.c | 196 struct host1x_job *job; in show_channel_gathers() local 198 list_for_each_entry(job, &cdma->sync_queue, list) { in show_channel_gathers() 201 host1x_debug_output(o, "JOB, syncpt %u: %u timeout: %u num_slots: %u num_handles: %u\n", in show_channel_gathers() 202 job->syncpt->id, job->syncpt_end, job->timeout, in show_channel_gathers() 203 job->num_slots, job->num_unpins); in show_channel_gathers() 205 show_gather(o, pb->dma + job->first_get, job->num_slots * 2, cdma, in show_channel_gathers() 206 pb->dma + job->first_get, pb->mapped + job->first_get); in show_channel_gathers() 208 for (i = 0; i < job->num_cmds; i++) { in show_channel_gathers() 212 if (job->cmds[i].is_wait) in show_channel_gathers() 215 g = &job->cmds[i].gather; in show_channel_gathers() [all …]
|
/Linux-v5.15/drivers/gpu/drm/ |
D | drm_writeback.c | 262 int drm_writeback_prepare_job(struct drm_writeback_job *job) in drm_writeback_prepare_job() argument 264 struct drm_writeback_connector *connector = job->connector; in drm_writeback_prepare_job() 270 ret = funcs->prepare_writeback_job(connector, job); in drm_writeback_prepare_job() 275 job->prepared = true; in drm_writeback_prepare_job() 281 * drm_writeback_queue_job - Queue a writeback job for later signalling 282 * @wb_connector: The writeback connector to queue a job on 283 * @conn_state: The connector state containing the job to queue 285 * This function adds the job contained in @conn_state to the job_queue for a 286 * writeback connector. It takes ownership of the writeback job and sets the 287 * @conn_state->writeback_job to NULL, and so no access to the job may be [all …]
|
/Linux-v5.15/drivers/scsi/lpfc/ |
D | lpfc_bsg.c | 71 /* driver data associated with the job */ 104 struct bsg_job *set_job; /* job waiting for this iocb to finish */ 304 struct bsg_job *job; in lpfc_bsg_send_mgmt_cmd_cmp() local 316 /* Determine if job has been aborted */ in lpfc_bsg_send_mgmt_cmd_cmp() 318 job = dd_data->set_job; in lpfc_bsg_send_mgmt_cmd_cmp() 319 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp() 320 bsg_reply = job->reply; in lpfc_bsg_send_mgmt_cmd_cmp() 321 /* Prevent timeout handling from trying to abort job */ in lpfc_bsg_send_mgmt_cmd_cmp() 322 job->dd_data = NULL; in lpfc_bsg_send_mgmt_cmd_cmp() 340 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp() [all …]
|
/Linux-v5.15/drivers/gpu/drm/scheduler/ |
D | sched_main.c | 32 * backend operations to the scheduler like submitting a job to hardware run queue, 33 * returning the dependencies of a job etc. 121 * drm_sched_rq_select_entity - Select an entity which could provide a job to run 165 * drm_sched_job_done - complete a job 166 * @s_job: pointer to the job which is done 168 * Finish the job's fence and wake up the worker thread. 187 * drm_sched_job_done_cb - the callback for a done job 252 * drm_sched_suspend_timeout - Suspend scheduler job timeout 282 * drm_sched_resume_timeout - Resume scheduler job timeout 316 struct drm_sched_job *job; in drm_sched_job_timedout() local [all …]
|
/Linux-v5.15/drivers/gpu/drm/tegra/ |
D | submit.c | 28 "%s: job submission failed: " fmt "\n", \ 303 static int submit_get_syncpt(struct tegra_drm_context *context, struct host1x_job *job, in submit_get_syncpt() argument 313 /* Syncpt ref will be dropped on job release */ in submit_get_syncpt() 320 job->syncpt = host1x_syncpt_get(sp); in submit_get_syncpt() 321 job->syncpt_incrs = args->syncpt.increments; in submit_get_syncpt() 326 static int submit_job_add_gather(struct host1x_job *job, struct tegra_drm_context *context, in submit_job_add_gather() argument 346 SUBMIT_ERR(context, "too many total words in job"); in submit_job_add_gather() 357 SUBMIT_ERR(context, "job was rejected by firewall"); in submit_job_add_gather() 361 host1x_job_add_gather(job, &bo->base, cmd->words, *offset * 4); in submit_job_add_gather() 375 struct host1x_job *job; in submit_create_job() local [all …]
|
/Linux-v5.15/include/drm/ |
D | gpu_scheduler.h | 50 * struct drm_sched_entity - A wrapper around a job queue (typically 70 * @dependency: the dependency fence of the job which is on the top 71 * of the job queue. 75 * @last_scheduled: points to the finished fence of the last scheduled job. 76 * @last_user: last group leader pushing a job into the entity. 126 * struct drm_sched_fence - fences corresponding to the scheduling of a job. 131 * when the job is scheduled. 137 * when the job is completed. 139 * When setting up an out fence for the job, you should use 149 * when scheduling the job on hardware. We signal the [all …]
|
/Linux-v5.15/drivers/crypto/caam/ |
D | Kconfig | 20 This module creates job ring devices, and configures h/w 36 tristate "Freescale CAAM Job Ring driver backend" 40 Enables the driver module for Job Rings which are part of 42 and Assurance Module (CAAM). This module adds a job ring operation 51 int "Job Ring size" 55 Select size of Job Rings as a power of 2, within the 68 bool "Job Ring interrupt coalescing" 70 Enable the Job Ring's interrupt coalescing feature. 76 int "Job Ring interrupt coalescing count threshold" 84 equal or greater than the job ring size will force timeouts. [all …]
|
/Linux-v5.15/Documentation/devicetree/bindings/powerpc/fsl/ |
D | raideng.txt | 30 There must be a sub-node for each job queue present in RAID Engine 33 - compatible: Should contain "fsl,raideng-v1.0-job-queue" as the value 34 This identifies the job queue interface 35 - reg: offset and length of the register set for job queue 42 compatible = "fsl,raideng-v1.0-job-queue"; 48 There must be a sub-node for each job ring present in RAID Engine 49 This node must be a sub-node of job queue node 51 - compatible: Must contain "fsl,raideng-v1.0-job-ring" as the value 52 This identifies job ring. Should contain either 55 - reg: offset and length of the register set for job ring [all …]
|
/Linux-v5.15/Documentation/core-api/ |
D | padata.rst | 14 is currently the sole consumer of padata's serialized job support. 16 Padata also supports multithreaded jobs, splitting up the job evenly while load 38 A padata_shell is used to submit a job to padata and allows a series of such 80 Running A Job 84 padata_priv structure, which represents one job:: 99 The submission of the job is done with:: 105 points to the preferred CPU to be used for the final callback when the job is 108 padata_do_parallel() is zero on success, indicating that the job is in 114 Each job submitted to padata_do_parallel() will, in turn, be passed to 123 parallel() will take responsibility for the job from this point. The job [all …]
|
/Linux-v5.15/drivers/scsi/ufs/ |
D | ufs_bsg.c | 42 static int ufs_bsg_alloc_desc_buffer(struct ufs_hba *hba, struct bsg_job *job, in ufs_bsg_alloc_desc_buffer() argument 46 struct ufs_bsg_request *bsg_request = job->request; in ufs_bsg_alloc_desc_buffer() 60 if (*desc_len > job->request_payload.payload_len) { in ufs_bsg_alloc_desc_buffer() 70 sg_copy_to_buffer(job->request_payload.sg_list, in ufs_bsg_alloc_desc_buffer() 71 job->request_payload.sg_cnt, descp, in ufs_bsg_alloc_desc_buffer() 80 static int ufs_bsg_request(struct bsg_job *job) in ufs_bsg_request() argument 82 struct ufs_bsg_request *bsg_request = job->request; in ufs_bsg_request() 83 struct ufs_bsg_reply *bsg_reply = job->reply; in ufs_bsg_request() 84 struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent)); in ufs_bsg_request() 85 unsigned int req_len = job->request_len; in ufs_bsg_request() [all …]
|
/Linux-v5.15/drivers/misc/habanalabs/common/ |
D | hw_queue.c | 218 * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion 261 * ext_queue_schedule_job - submit a JOB to an external queue 263 * @job: pointer to the job that needs to be submitted to the queue 268 static void ext_queue_schedule_job(struct hl_cs_job *job) in ext_queue_schedule_job() argument 270 struct hl_device *hdev = job->cs->ctx->hdev; in ext_queue_schedule_job() 271 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; in ext_queue_schedule_job() 281 * Update the JOB ID inside the BD CTL so the device would know what in ext_queue_schedule_job() 286 cb = job->patched_cb; in ext_queue_schedule_job() 287 len = job->job_cb_size; in ext_queue_schedule_job() 291 if (!cs_needs_completion(job->cs)) in ext_queue_schedule_job() [all …]
|
/Linux-v5.15/Documentation/devicetree/bindings/crypto/ |
D | fsl-sec6.txt | 5 -Job Ring Node 72 Job Ring (JR) Node 84 Definition: Must include "fsl,sec-v6.0-job-ring". 103 compatible = "fsl,sec-v6.0-job-ring"; 123 compatible = "fsl,sec-v6.0-job-ring", 124 "fsl,sec-v5.2-job-ring", 125 "fsl,sec-v5.0-job-ring", 126 "fsl,sec-v4.4-job-ring", 127 "fsl,sec-v4.0-job-ring"; 132 compatible = "fsl,sec-v6.0-job-ring", [all …]
|
/Linux-v5.15/arch/powerpc/boot/dts/fsl/ |
D | qoriq-sec6.0-0.dtsi | 42 compatible = "fsl,sec-v6.0-job-ring", 43 "fsl,sec-v5.2-job-ring", 44 "fsl,sec-v5.0-job-ring", 45 "fsl,sec-v4.4-job-ring", 46 "fsl,sec-v4.0-job-ring"; 51 compatible = "fsl,sec-v6.0-job-ring", 52 "fsl,sec-v5.2-job-ring", 53 "fsl,sec-v5.0-job-ring", 54 "fsl,sec-v4.4-job-ring", 55 "fsl,sec-v4.0-job-ring";
|