Home
last modified time | relevance | path

Searched full:job (Results 1 – 25 of 649) sorted by relevance

12345678910>>...26

/Linux-v6.1/drivers/gpu/host1x/
Djob.c3 * Tegra host1x Job
21 #include "job.h"
30 struct host1x_job *job = NULL; in host1x_job_alloc() local
51 mem = job = kzalloc(total, GFP_KERNEL); in host1x_job_alloc()
52 if (!job) in host1x_job_alloc()
55 job->enable_firewall = enable_firewall; in host1x_job_alloc()
57 kref_init(&job->ref); in host1x_job_alloc()
58 job->channel = ch; in host1x_job_alloc()
62 job->relocs = num_relocs ? mem : NULL; in host1x_job_alloc()
64 job->unpins = num_unpins ? mem : NULL; in host1x_job_alloc()
[all …]
Dcdma.c23 #include "job.h"
270 * Start timer that tracks the time spent by the job.
274 struct host1x_job *job) in cdma_start_timer_locked() argument
281 cdma->timeout.client = job->client; in cdma_start_timer_locked()
282 cdma->timeout.syncpt = job->syncpt; in cdma_start_timer_locked()
283 cdma->timeout.syncpt_val = job->syncpt_end; in cdma_start_timer_locked()
287 msecs_to_jiffies(job->timeout)); in cdma_start_timer_locked()
313 struct host1x_job *job, *n; in update_cdma_locked() local
319 list_for_each_entry_safe(job, n, &cdma->sync_queue, list) { in update_cdma_locked()
320 struct host1x_syncpt *sp = job->syncpt; in update_cdma_locked()
[all …]
/Linux-v6.1/drivers/gpu/drm/amd/amdgpu/
Damdgpu_job.c37 struct amdgpu_job *job = to_amdgpu_job(s_job); in amdgpu_job_timedout() local
47 /* Effectively the job is aborted as the device is gone */ in amdgpu_job_timedout()
55 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { in amdgpu_job_timedout()
61 amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); in amdgpu_job_timedout()
63 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), in amdgpu_job_timedout()
76 r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context); in amdgpu_job_timedout()
92 struct amdgpu_job **job, struct amdgpu_vm *vm) in amdgpu_job_alloc() argument
97 *job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL); in amdgpu_job_alloc()
98 if (!*job) in amdgpu_job_alloc()
105 (*job)->base.sched = &adev->rings[0]->sched; in amdgpu_job_alloc()
[all …]
Damdgpu_job.h42 #define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0) argument
69 /* job_run_counter >= 1 means a resubmit job */
76 static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job) in amdgpu_job_ring() argument
78 return to_amdgpu_ring(job->base.entity->rq->sched); in amdgpu_job_ring()
82 struct amdgpu_job **job, struct amdgpu_vm *vm);
84 enum amdgpu_ib_pool_type pool, struct amdgpu_job **job);
85 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
87 void amdgpu_job_free_resources(struct amdgpu_job *job);
88 void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
90 void amdgpu_job_free(struct amdgpu_job *job);
[all …]
Damdgpu_ib.c110 * @job: job to schedule
127 struct amdgpu_ib *ibs, struct amdgpu_job *job, in amdgpu_ib_schedule() argument
148 /* ring tests don't use a job */ in amdgpu_ib_schedule()
149 if (job) { in amdgpu_ib_schedule()
150 vm = job->vm; in amdgpu_ib_schedule()
151 fence_ctx = job->base.s_fence ? in amdgpu_ib_schedule()
152 job->base.s_fence->scheduled.context : 0; in amdgpu_ib_schedule()
163 if (vm && !job->vmid && !ring->is_mes_queue) { in amdgpu_ib_schedule()
184 if (ring->funcs->emit_pipeline_sync && job && in amdgpu_ib_schedule()
185 ((tmp = amdgpu_sync_get_fence(&job->sched_sync)) || in amdgpu_ib_schedule()
[all …]
/Linux-v6.1/drivers/md/
Ddm-kcopyd.c40 MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
347 * Error state of the job.
367 * Set this to ensure you are notified when the job has
374 * These fields are only used if the job has been split
408 * Functions to push and pop a job onto the head of a given job
414 struct kcopyd_job *job; in pop_io_job() local
420 list_for_each_entry(job, jobs, list) { in pop_io_job()
421 if (job->op == REQ_OP_READ || in pop_io_job()
422 !(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) { in pop_io_job()
423 list_del(&job->list); in pop_io_job()
[all …]
/Linux-v6.1/drivers/gpu/drm/v3d/
Dv3d_sched.c10 * scheduler will round-robin between clients to submit the next job.
13 * jobs when bulk background jobs are queued up, we submit a new job
60 struct v3d_job *job = to_v3d_job(sched_job); in v3d_sched_job_free() local
62 v3d_job_cleanup(job); in v3d_sched_job_free()
66 v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job) in v3d_switch_perfmon() argument
68 if (job->perfmon != v3d->active_perfmon) in v3d_switch_perfmon()
71 if (job->perfmon && v3d->active_perfmon != job->perfmon) in v3d_switch_perfmon()
72 v3d_perfmon_start(v3d, job->perfmon); in v3d_switch_perfmon()
77 struct v3d_bin_job *job = to_bin_job(sched_job); in v3d_bin_job_run() local
78 struct v3d_dev *v3d = job->base.v3d; in v3d_bin_job_run()
[all …]
Dv3d_gem.c168 * need to wait for completion before dispatching the job -- in v3d_flush_l2t()
172 * synchronously clean after a job. in v3d_flush_l2t()
185 * signaling job completion. So, we synchronously wait before
251 v3d_lock_bo_reservations(struct v3d_job *job, in v3d_lock_bo_reservations() argument
256 ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx); in v3d_lock_bo_reservations()
260 for (i = 0; i < job->bo_count; i++) { in v3d_lock_bo_reservations()
261 ret = dma_resv_reserve_fences(job->bo[i]->resv, 1); in v3d_lock_bo_reservations()
265 ret = drm_sched_job_add_implicit_dependencies(&job->base, in v3d_lock_bo_reservations()
266 job->bo[i], true); in v3d_lock_bo_reservations()
274 drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); in v3d_lock_bo_reservations()
[all …]
/Linux-v6.1/block/
Dbsg-lib.c31 struct bsg_job *job; in bsg_transport_sg_io_fn() local
49 job = blk_mq_rq_to_pdu(rq); in bsg_transport_sg_io_fn()
50 reply = job->reply; in bsg_transport_sg_io_fn()
51 memset(job, 0, sizeof(*job)); in bsg_transport_sg_io_fn()
52 job->reply = reply; in bsg_transport_sg_io_fn()
53 job->reply_len = SCSI_SENSE_BUFFERSIZE; in bsg_transport_sg_io_fn()
54 job->dd_data = job + 1; in bsg_transport_sg_io_fn()
56 job->request_len = hdr->request_len; in bsg_transport_sg_io_fn()
57 job->request = memdup_user(uptr64(hdr->request), hdr->request_len); in bsg_transport_sg_io_fn()
58 if (IS_ERR(job->request)) { in bsg_transport_sg_io_fn()
[all …]
/Linux-v6.1/drivers/gpu/host1x/hw/
Dchannel_hw.c17 #include "../job.h"
50 static void submit_wait(struct host1x_job *job, u32 id, u32 threshold, in submit_wait() argument
53 struct host1x_cdma *cdma = &job->channel->cdma; in submit_wait()
63 if (job->memory_context) in submit_wait()
64 stream_id = job->memory_context->stream_id; in submit_wait()
66 stream_id = job->engine_fallback_streamid; in submit_wait()
79 host1x_cdma_push_wide(&job->channel->cdma, in submit_wait()
80 host1x_opcode_setclass(job->class, 0, 0), in submit_wait()
82 host1x_opcode_setstreamid(job->engine_streamid_offset / 4), in submit_wait()
113 static void submit_gathers(struct host1x_job *job, u32 job_syncpt_base) in submit_gathers() argument
[all …]
Ddebug_hw.c196 struct host1x_job *job; in show_channel_gathers() local
198 list_for_each_entry(job, &cdma->sync_queue, list) { in show_channel_gathers()
201 host1x_debug_output(o, "JOB, syncpt %u: %u timeout: %u num_slots: %u num_handles: %u\n", in show_channel_gathers()
202 job->syncpt->id, job->syncpt_end, job->timeout, in show_channel_gathers()
203 job->num_slots, job->num_unpins); in show_channel_gathers()
205 show_gather(o, pb->dma + job->first_get, job->num_slots * 2, cdma, in show_channel_gathers()
206 pb->dma + job->first_get, pb->mapped + job->first_get); in show_channel_gathers()
208 for (i = 0; i < job->num_cmds; i++) { in show_channel_gathers()
212 if (job->cmds[i].is_wait) in show_channel_gathers()
215 g = &job->cmds[i].gather; in show_channel_gathers()
[all …]
/Linux-v6.1/drivers/gpu/drm/panfrost/
Dpanfrost_job.c106 int panfrost_job_get_slot(struct panfrost_job *job) in panfrost_job_get_slot() argument
112 if (job->requirements & PANFROST_JD_REQ_FS) in panfrost_job_get_slot()
117 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) { in panfrost_job_get_slot()
118 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) && in panfrost_job_get_slot()
119 (job->pfdev->features.nr_core_groups == 2)) in panfrost_job_get_slot()
121 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987)) in panfrost_job_get_slot()
146 panfrost_get_job_chain_flag(const struct panfrost_job *job) in panfrost_get_job_chain_flag() argument
148 struct panfrost_fence *f = to_panfrost_fence(job->done_fence); in panfrost_get_job_chain_flag()
150 if (!panfrost_has_hw_feature(job->pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) in panfrost_get_job_chain_flag()
159 struct panfrost_job *job = pfdev->jobs[slot][0]; in panfrost_dequeue_job() local
[all …]
Dpanfrost_drv.c113 * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects
114 * referenced by the job.
118 * @job: job being set up
120 * Resolve handles from userspace to BOs and attach them to job.
129 struct panfrost_job *job) in panfrost_lookup_bos() argument
136 job->bo_count = args->bo_handle_count; in panfrost_lookup_bos()
138 if (!job->bo_count) in panfrost_lookup_bos()
143 job->bo_count, &job->bos); in panfrost_lookup_bos()
147 job->mappings = kvmalloc_array(job->bo_count, in panfrost_lookup_bos()
150 if (!job->mappings) in panfrost_lookup_bos()
[all …]
/Linux-v6.1/drivers/gpu/drm/scheduler/
Dsched_main.c32 * backend operations to the scheduler like submitting a job to hardware run queue,
33 * returning the dependencies of a job etc.
123 * drm_sched_rq_select_entity - Select an entity which could provide a job to run
167 * drm_sched_job_done - complete a job
168 * @s_job: pointer to the job which is done
170 * Finish the job's fence and wake up the worker thread.
189 * drm_sched_job_done_cb - the callback for a done job
254 * drm_sched_suspend_timeout - Suspend scheduler job timeout
284 * drm_sched_resume_timeout - Resume scheduler job timeout
318 struct drm_sched_job *job; in drm_sched_job_timedout() local
[all …]
Dsched_entity.c45 * @guilty: atomic_t set to 1 when a job on this queue
130 /* Return true if entity could provide a job. */
195 struct drm_sched_job *job = container_of(wrk, typeof(*job), work); in drm_sched_entity_kill_jobs_work() local
197 drm_sched_fence_finished(job->s_fence); in drm_sched_entity_kill_jobs_work()
198 WARN_ON(job->s_fence->parent); in drm_sched_entity_kill_jobs_work()
199 job->sched->ops->free_job(job); in drm_sched_entity_kill_jobs_work()
207 struct drm_sched_job *job = container_of(cb, struct drm_sched_job, in drm_sched_entity_kill_jobs_cb() local
211 INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work); in drm_sched_entity_kill_jobs_cb()
212 schedule_work(&job->work); in drm_sched_entity_kill_jobs_cb()
216 drm_sched_job_dependency(struct drm_sched_job *job, in drm_sched_job_dependency() argument
[all …]
/Linux-v6.1/drivers/gpu/drm/tegra/
Dsubmit.c28 "%s: job submission failed: " fmt "\n", \
332 static int submit_get_syncpt(struct tegra_drm_context *context, struct host1x_job *job, in submit_get_syncpt() argument
342 /* Syncpt ref will be dropped on job release */ in submit_get_syncpt()
349 job->syncpt = host1x_syncpt_get(sp); in submit_get_syncpt()
350 job->syncpt_incrs = args->syncpt.increments; in submit_get_syncpt()
355 static int submit_job_add_gather(struct host1x_job *job, struct tegra_drm_context *context, in submit_job_add_gather() argument
375 SUBMIT_ERR(context, "too many total words in job"); in submit_job_add_gather()
386 SUBMIT_ERR(context, "job was rejected by firewall"); in submit_job_add_gather()
390 host1x_job_add_gather(job, &bo->base, cmd->words, *offset * 4); in submit_job_add_gather()
404 struct host1x_job *job; in submit_create_job() local
[all …]
/Linux-v6.1/drivers/scsi/lpfc/
Dlpfc_bsg.c71 /* driver data associated with the job */
96 struct bsg_job *set_job; /* job waiting for this iocb to finish */
295 struct bsg_job *job; in lpfc_bsg_send_mgmt_cmd_cmp() local
306 /* Determine if job has been aborted */ in lpfc_bsg_send_mgmt_cmd_cmp()
308 job = dd_data->set_job; in lpfc_bsg_send_mgmt_cmd_cmp()
309 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp()
310 bsg_reply = job->reply; in lpfc_bsg_send_mgmt_cmd_cmp()
311 /* Prevent timeout handling from trying to abort job */ in lpfc_bsg_send_mgmt_cmd_cmp()
312 job->dd_data = NULL; in lpfc_bsg_send_mgmt_cmd_cmp()
332 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp()
[all …]
/Linux-v6.1/drivers/gpu/drm/
Ddrm_writeback.c312 int drm_writeback_prepare_job(struct drm_writeback_job *job) in drm_writeback_prepare_job() argument
314 struct drm_writeback_connector *connector = job->connector; in drm_writeback_prepare_job()
320 ret = funcs->prepare_writeback_job(connector, job); in drm_writeback_prepare_job()
325 job->prepared = true; in drm_writeback_prepare_job()
331 * drm_writeback_queue_job - Queue a writeback job for later signalling
332 * @wb_connector: The writeback connector to queue a job on
333 * @conn_state: The connector state containing the job to queue
335 * This function adds the job contained in @conn_state to the job_queue for a
336 * writeback connector. It takes ownership of the writeback job and sets the
337 * @conn_state->writeback_job to NULL, and so no access to the job may be
[all …]
/Linux-v6.1/include/drm/
Dgpu_scheduler.h63 * struct drm_sched_entity - A wrapper around a job queue (typically
160 * The dependency fence of the job which is on the top of the job queue.
181 * Points to the finished fence of the last scheduled job. Only written
188 * @last_user: last group leader pushing a job into the entity.
230 * struct drm_sched_fence - fences corresponding to the scheduling of a job.
235 * when the job is scheduled.
241 * when the job is completed.
243 * When setting up an out fence for the job, you should use
253 * when scheduling the job on hardware. We signal the
258 * @sched: the scheduler instance to which the job having this struct
[all …]
/Linux-v6.1/Documentation/devicetree/bindings/powerpc/fsl/
Draideng.txt30 There must be a sub-node for each job queue present in RAID Engine
33 - compatible: Should contain "fsl,raideng-v1.0-job-queue" as the value
34 This identifies the job queue interface
35 - reg: offset and length of the register set for job queue
42 compatible = "fsl,raideng-v1.0-job-queue";
48 There must be a sub-node for each job ring present in RAID Engine
49 This node must be a sub-node of job queue node
51 - compatible: Must contain "fsl,raideng-v1.0-job-ring" as the value
52 This identifies job ring. Should contain either
55 - reg: offset and length of the register set for job ring
[all …]
/Linux-v6.1/drivers/crypto/caam/
DKconfig20 This module creates job ring devices, and configures h/w
36 tristate "Freescale CAAM Job Ring driver backend"
40 Enables the driver module for Job Rings which are part of
42 and Assurance Module (CAAM). This module adds a job ring operation
51 int "Job Ring size"
55 Select size of Job Rings as a power of 2, within the
68 bool "Job Ring interrupt coalescing"
70 Enable the Job Ring's interrupt coalescing feature.
76 int "Job Ring interrupt coalescing count threshold"
84 equal or greater than the job ring size will force timeouts.
[all …]
/Linux-v6.1/Documentation/core-api/
Dpadata.rst14 is currently the sole consumer of padata's serialized job support.
16 Padata also supports multithreaded jobs, splitting up the job evenly while load
38 A padata_shell is used to submit a job to padata and allows a series of such
80 Running A Job
84 padata_priv structure, which represents one job::
99 The submission of the job is done with::
105 points to the preferred CPU to be used for the final callback when the job is
108 padata_do_parallel() is zero on success, indicating that the job is in
114 Each job submitted to padata_do_parallel() will, in turn, be passed to
123 parallel() will take responsibility for the job from this point. The job
[all …]
/Linux-v6.1/drivers/ufs/core/
Dufs_bsg.c48 static int ufs_bsg_alloc_desc_buffer(struct ufs_hba *hba, struct bsg_job *job, in ufs_bsg_alloc_desc_buffer() argument
52 struct ufs_bsg_request *bsg_request = job->request; in ufs_bsg_alloc_desc_buffer()
66 if (*desc_len > job->request_payload.payload_len) { in ufs_bsg_alloc_desc_buffer()
76 sg_copy_to_buffer(job->request_payload.sg_list, in ufs_bsg_alloc_desc_buffer()
77 job->request_payload.sg_cnt, descp, in ufs_bsg_alloc_desc_buffer()
86 static int ufs_bsg_request(struct bsg_job *job) in ufs_bsg_request() argument
88 struct ufs_bsg_request *bsg_request = job->request; in ufs_bsg_request()
89 struct ufs_bsg_reply *bsg_reply = job->reply; in ufs_bsg_request()
90 struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent)); in ufs_bsg_request()
91 unsigned int req_len = job->request_len; in ufs_bsg_request()
[all …]
/Linux-v6.1/drivers/misc/habanalabs/common/
Dhw_queue.c218 * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion
261 * ext_queue_schedule_job - submit a JOB to an external queue
263 * @job: pointer to the job that needs to be submitted to the queue
268 static void ext_queue_schedule_job(struct hl_cs_job *job) in ext_queue_schedule_job() argument
270 struct hl_device *hdev = job->cs->ctx->hdev; in ext_queue_schedule_job()
271 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; in ext_queue_schedule_job()
281 * Update the JOB ID inside the BD CTL so the device would know what in ext_queue_schedule_job()
286 cb = job->patched_cb; in ext_queue_schedule_job()
287 len = job->job_cb_size; in ext_queue_schedule_job()
291 if (!cs_needs_completion(job->cs)) in ext_queue_schedule_job()
[all …]
/Linux-v6.1/drivers/scsi/mpi3mr/
Dmpi3mr_app.c131 * @job: BSG job reference
140 struct bsg_job *job) in mpi3mr_enable_logdata() argument
159 if (job->request_payload.payload_len >= sizeof(logdata_enable)) { in mpi3mr_enable_logdata()
160 sg_copy_from_buffer(job->request_payload.sg_list, in mpi3mr_enable_logdata()
161 job->request_payload.sg_cnt, in mpi3mr_enable_logdata()
171 * @job: BSG job pointer
178 struct bsg_job *job) in mpi3mr_get_logdata() argument
182 if ((!mrioc->logdata_buf) || (job->request_payload.payload_len < entry_sz)) in mpi3mr_get_logdata()
185 num_entries = job->request_payload.payload_len / entry_sz; in mpi3mr_get_logdata()
190 if (job->request_payload.payload_len >= sz) { in mpi3mr_get_logdata()
[all …]

12345678910>>...26