Home
last modified time | relevance | path

Searched refs:submit (Results 1 – 25 of 158) sorted by relevance

1234567

/Linux-v5.4/drivers/gpu/drm/etnaviv/
Detnaviv_gem_submit.c34 struct etnaviv_gem_submit *submit; in submit_create() local
35 size_t sz = size_vstruct(nr_bos, sizeof(submit->bos[0]), sizeof(*submit)); in submit_create()
37 submit = kzalloc(sz, GFP_KERNEL); in submit_create()
38 if (!submit) in submit_create()
41 submit->pmrs = kcalloc(nr_pmrs, sizeof(struct etnaviv_perfmon_request), in submit_create()
43 if (!submit->pmrs) { in submit_create()
44 kfree(submit); in submit_create()
47 submit->nr_pmrs = nr_pmrs; in submit_create()
49 submit->gpu = gpu; in submit_create()
50 kref_init(&submit->refcount); in submit_create()
[all …]
Detnaviv_sched.c24 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); in etnaviv_sched_dependency() local
28 if (unlikely(submit->in_fence)) { in etnaviv_sched_dependency()
29 fence = submit->in_fence; in etnaviv_sched_dependency()
30 submit->in_fence = NULL; in etnaviv_sched_dependency()
38 for (i = 0; i < submit->nr_bos; i++) { in etnaviv_sched_dependency()
39 struct etnaviv_gem_submit_bo *bo = &submit->bos[i]; in etnaviv_sched_dependency()
74 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); in etnaviv_sched_run_job() local
78 fence = etnaviv_gpu_submit(submit); in etnaviv_sched_run_job()
80 dev_dbg(submit->gpu->dev, "skipping bad job\n"); in etnaviv_sched_run_job()
87 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); in etnaviv_sched_timedout_job() local
[all …]
Detnaviv_dump.c113 void etnaviv_core_dump(struct etnaviv_gem_submit *submit) in etnaviv_core_dump() argument
115 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_core_dump()
137 mmu_size + gpu->buffer.size + submit->cmdbuf.size; in etnaviv_core_dump()
140 for (i = 0; i < submit->nr_bos; i++) { in etnaviv_core_dump()
141 obj = submit->bos[i].obj; in etnaviv_core_dump()
179 submit->cmdbuf.vaddr, submit->cmdbuf.size, in etnaviv_core_dump()
180 etnaviv_cmdbuf_get_va(&submit->cmdbuf, in etnaviv_core_dump()
196 for (i = 0; i < submit->nr_bos; i++) { in etnaviv_core_dump()
201 obj = submit->bos[i].obj; in etnaviv_core_dump()
202 vram = submit->bos[i].mapping; in etnaviv_core_dump()
Detnaviv_gpu.c1206 const struct etnaviv_gem_submit *submit = event->submit; in sync_point_perfmon_sample() local
1209 for (i = 0; i < submit->nr_pmrs; i++) { in sync_point_perfmon_sample()
1210 const struct etnaviv_perfmon_request *pmr = submit->pmrs + i; in sync_point_perfmon_sample()
1213 etnaviv_perfmon_process(gpu, pmr, submit->exec_state); in sync_point_perfmon_sample()
1238 const struct etnaviv_gem_submit *submit = event->submit; in sync_point_perfmon_sample_post() local
1244 for (i = 0; i < submit->nr_pmrs; i++) { in sync_point_perfmon_sample_post()
1245 const struct etnaviv_perfmon_request *pmr = submit->pmrs + i; in sync_point_perfmon_sample_post()
1263 struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) in etnaviv_gpu_submit() argument
1265 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_gpu_submit()
1270 if (!submit->runtime_resumed) { in etnaviv_gpu_submit()
[all …]
/Linux-v5.4/drivers/gpu/drm/msm/
Dmsm_gem_submit.c32 struct msm_gem_submit *submit; in submit_create() local
33 uint64_t sz = struct_size(submit, bos, nr_bos) + in submit_create()
34 ((u64)nr_cmds * sizeof(submit->cmd[0])); in submit_create()
39 submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); in submit_create()
40 if (!submit) in submit_create()
43 submit->dev = dev; in submit_create()
44 submit->aspace = aspace; in submit_create()
45 submit->gpu = gpu; in submit_create()
46 submit->fence = NULL; in submit_create()
47 submit->cmd = (void *)&submit->bos[nr_bos]; in submit_create()
[all …]
Dmsm_rd.c86 struct msm_gem_submit *submit; member
300 struct msm_gem_submit *submit, int idx, in snapshot_buf() argument
303 struct msm_gem_object *obj = submit->bos[idx].obj; in snapshot_buf()
308 offset = iova - submit->bos[idx].iova; in snapshot_buf()
310 iova = submit->bos[idx].iova; in snapshot_buf()
322 if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ)) in snapshot_buf()
337 should_dump(struct msm_gem_submit *submit, int idx) in should_dump() argument
339 return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP); in should_dump()
343 void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, in msm_rd_dump_submit() argument
346 struct drm_device *dev = submit->dev; in msm_rd_dump_submit()
[all …]
Dmsm_gpu.c336 struct msm_gem_submit *submit, char *comm, char *cmd) in msm_gpu_crashstate_capture() argument
356 if (submit) { in msm_gpu_crashstate_capture()
359 state->bos = kcalloc(submit->nr_cmds, in msm_gpu_crashstate_capture()
362 for (i = 0; state->bos && i < submit->nr_cmds; i++) { in msm_gpu_crashstate_capture()
363 int idx = submit->cmd[i].idx; in msm_gpu_crashstate_capture()
365 msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj, in msm_gpu_crashstate_capture()
366 submit->bos[idx].iova, submit->bos[idx].flags); in msm_gpu_crashstate_capture()
379 struct msm_gem_submit *submit, char *comm, char *cmd) in msm_gpu_crashstate_capture() argument
391 struct msm_gem_submit *submit; in update_fences() local
393 list_for_each_entry(submit, &ring->submits, node) { in update_fences()
[all …]
Dmsm_gpu_trace.h34 TP_PROTO(struct msm_gem_submit *submit, u64 ticks),
35 TP_ARGS(submit, ticks),
44 __entry->pid = pid_nr(submit->pid);
45 __entry->id = submit->ident;
46 __entry->ringid = submit->ring->id;
47 __entry->seqno = submit->seqno;
57 TP_PROTO(struct msm_gem_submit *submit, u64 elapsed, u64 clock,
59 TP_ARGS(submit, elapsed, clock, start, end),
71 __entry->pid = pid_nr(submit->pid);
72 __entry->id = submit->ident;
[all …]
/Linux-v5.4/crypto/async_tx/
Dasync_raid6_recov.c19 size_t len, struct async_submit_ctl *submit) in async_sum_product() argument
21 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, in async_sum_product()
38 if (submit->flags & ASYNC_TX_FENCE) in async_sum_product()
54 async_tx_submit(chan, tx, submit); in async_sum_product()
66 async_tx_quiesce(&submit->depend_tx); in async_sum_product()
84 struct async_submit_ctl *submit) in async_mult() argument
86 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, in async_mult()
102 if (submit->flags & ASYNC_TX_FENCE) in async_mult()
121 async_tx_submit(chan, tx, submit); in async_mult()
134 async_tx_quiesce(&submit->depend_tx); in async_mult()
[all …]
Dasync_xor.c24 struct async_submit_ctl *submit) in do_async_xor() argument
28 dma_async_tx_callback cb_fn_orig = submit->cb_fn; in do_async_xor()
29 void *cb_param_orig = submit->cb_param; in do_async_xor()
30 enum async_tx_flags flags_orig = submit->flags; in do_async_xor()
40 submit->flags = flags_orig; in do_async_xor()
46 submit->flags &= ~ASYNC_TX_ACK; in do_async_xor()
47 submit->flags |= ASYNC_TX_FENCE; in do_async_xor()
48 submit->cb_fn = NULL; in do_async_xor()
49 submit->cb_param = NULL; in do_async_xor()
51 submit->cb_fn = cb_fn_orig; in do_async_xor()
[all …]
Dasync_pq.c39 struct async_submit_ctl *submit) in do_async_gen_syndrome() argument
43 enum async_tx_flags flags_orig = submit->flags; in do_async_gen_syndrome()
44 dma_async_tx_callback cb_fn_orig = submit->cb_fn; in do_async_gen_syndrome()
45 dma_async_tx_callback cb_param_orig = submit->cb_param; in do_async_gen_syndrome()
52 submit->flags = flags_orig; in do_async_gen_syndrome()
59 submit->flags &= ~ASYNC_TX_ACK; in do_async_gen_syndrome()
60 submit->flags |= ASYNC_TX_FENCE; in do_async_gen_syndrome()
61 submit->cb_fn = NULL; in do_async_gen_syndrome()
62 submit->cb_param = NULL; in do_async_gen_syndrome()
64 submit->cb_fn = cb_fn_orig; in do_async_gen_syndrome()
[all …]
Draid6test.c57 struct async_submit_ctl submit; in raid6_dual_recov() local
68 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); in raid6_dual_recov()
69 tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit); in raid6_dual_recov()
87 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, in raid6_dual_recov()
89 tx = async_xor(dest, blocks, 0, count, bytes, &submit); in raid6_dual_recov()
91 init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv); in raid6_dual_recov()
92 tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit); in raid6_dual_recov()
97 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); in raid6_dual_recov()
98 tx = async_raid6_datap_recov(disks, bytes, faila, ptrs, &submit); in raid6_dual_recov()
101 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); in raid6_dual_recov()
[all …]
Dasync_tx.c43 __async_tx_find_channel(struct async_submit_ctl *submit, in __async_tx_find_channel() argument
46 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; in __async_tx_find_channel()
144 struct async_submit_ctl *submit) in async_tx_submit() argument
146 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; in async_tx_submit()
148 tx->callback = submit->cb_fn; in async_tx_submit()
149 tx->callback_param = submit->cb_param; in async_tx_submit()
204 if (submit->flags & ASYNC_TX_ACK) in async_tx_submit()
221 async_trigger_callback(struct async_submit_ctl *submit) in async_trigger_callback() argument
226 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; in async_trigger_callback()
245 async_tx_submit(chan, tx, submit); in async_trigger_callback()
[all …]
Dasync_memcpy.c34 struct async_submit_ctl *submit) in async_memcpy() argument
36 struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY, in async_memcpy()
48 if (submit->cb_fn) in async_memcpy()
50 if (submit->flags & ASYNC_TX_FENCE) in async_memcpy()
70 async_tx_submit(chan, tx, submit); in async_memcpy()
76 async_tx_quiesce(&submit->depend_tx); in async_memcpy()
86 async_tx_sync_epilog(submit); in async_memcpy()
/Linux-v5.4/drivers/gpu/drm/lima/
Dlima_gem.c206 static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit) in lima_gem_add_deps() argument
210 for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) { in lima_gem_add_deps()
213 if (!submit->in_sync[i]) in lima_gem_add_deps()
216 err = drm_syncobj_find_fence(file, submit->in_sync[i], in lima_gem_add_deps()
221 err = drm_gem_fence_array_add(&submit->task->deps, fence); in lima_gem_add_deps()
231 int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) in lima_gem_submit() argument
239 struct lima_bo **bos = submit->lbos; in lima_gem_submit()
241 if (submit->out_sync) { in lima_gem_submit()
242 out_sync = drm_syncobj_find(file, submit->out_sync); in lima_gem_submit()
247 for (i = 0; i < submit->nr_bos; i++) { in lima_gem_submit()
[all …]
Dlima_drv.c97 struct lima_submit submit = {0}; in lima_ioctl_gem_submit() local
111 bos = kvcalloc(args->nr_bos, sizeof(*submit.bos) + sizeof(*submit.lbos), GFP_KERNEL); in lima_ioctl_gem_submit()
115 size = args->nr_bos * sizeof(*submit.bos); in lima_ioctl_gem_submit()
143 submit.pipe = args->pipe; in lima_ioctl_gem_submit()
144 submit.bos = bos; in lima_ioctl_gem_submit()
145 submit.lbos = (void *)bos + size; in lima_ioctl_gem_submit()
146 submit.nr_bos = args->nr_bos; in lima_ioctl_gem_submit()
147 submit.task = task; in lima_ioctl_gem_submit()
148 submit.ctx = ctx; in lima_ioctl_gem_submit()
149 submit.flags = args->flags; in lima_ioctl_gem_submit()
[all …]
/Linux-v5.4/include/linux/
Dasync_tx.h103 __async_tx_find_channel(struct async_submit_ctl *submit,
118 async_tx_find_channel(struct async_submit_ctl *submit, in async_tx_find_channel() argument
133 async_tx_sync_epilog(struct async_submit_ctl *submit) in async_tx_sync_epilog() argument
135 if (submit->cb_fn) in async_tx_sync_epilog()
136 submit->cb_fn(submit->cb_param); in async_tx_sync_epilog()
159 struct async_submit_ctl *submit);
163 int src_cnt, size_t len, struct async_submit_ctl *submit);
168 struct async_submit_ctl *submit);
173 struct async_submit_ctl *submit);
175 struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit);
[all …]
/Linux-v5.4/drivers/dma/ti/
Dcppi41.c119 u16 submit; member
157 [ 0] = { .submit = 32, .complete = 93},
158 [ 1] = { .submit = 34, .complete = 94},
159 [ 2] = { .submit = 36, .complete = 95},
160 [ 3] = { .submit = 38, .complete = 96},
161 [ 4] = { .submit = 40, .complete = 97},
162 [ 5] = { .submit = 42, .complete = 98},
163 [ 6] = { .submit = 44, .complete = 99},
164 [ 7] = { .submit = 46, .complete = 100},
165 [ 8] = { .submit = 48, .complete = 101},
[all …]
/Linux-v5.4/fs/iomap/
Ddirect-io.c42 } submit; member
69 dio->submit.last_queue = bdev_get_queue(iomap->bdev); in iomap_dio_submit_bio()
70 dio->submit.cookie = submit_bio(bio); in iomap_dio_submit_bio()
156 struct task_struct *waiter = dio->submit.waiter; in iomap_dio_bio_end_io()
157 WRITE_ONCE(dio->submit.waiter, NULL); in iomap_dio_bio_end_io()
203 unsigned int align = iov_iter_alignment(dio->submit.iter); in iomap_dio_bio_actor()
242 iter = *dio->submit.iter; in iomap_dio_bio_actor()
259 iov_iter_revert(dio->submit.iter, copied); in iomap_dio_bio_actor()
297 iov_iter_advance(dio->submit.iter, n); in iomap_dio_bio_actor()
327 length = iov_iter_zero(length, dio->submit.iter); in iomap_dio_hole_actor()
[all …]
/Linux-v5.4/drivers/gpu/drm/i915/selftests/
Di915_active.c80 struct i915_sw_fence *submit; in __live_active_setup() local
90 submit = heap_fence_create(GFP_KERNEL); in __live_active_setup()
91 if (!submit) { in __live_active_setup()
109 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit, in __live_active_setup()
110 submit, in __live_active_setup()
135 i915_sw_fence_commit(submit); in __live_active_setup()
136 heap_fence_put(submit); in __live_active_setup()
/Linux-v5.4/drivers/gpu/drm/msm/adreno/
Da5xx_gpu.c46 static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit, in a5xx_submit_in_rb() argument
50 struct msm_ringbuffer *ring = submit->ring; in a5xx_submit_in_rb()
55 for (i = 0; i < submit->nr_cmds; i++) { in a5xx_submit_in_rb()
56 switch (submit->cmd[i].type) { in a5xx_submit_in_rb()
65 obj = submit->bos[submit->cmd[i].idx].obj; in a5xx_submit_in_rb()
66 dwords = submit->cmd[i].size; in a5xx_submit_in_rb()
102 ring->memptrs->fence = submit->seqno; in a5xx_submit_in_rb()
106 static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, in a5xx_submit() argument
112 struct msm_ringbuffer *ring = submit->ring; in a5xx_submit()
115 if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) { in a5xx_submit()
[all …]
Da6xx_gpu.c82 static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, in a6xx_submit() argument
85 unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; in a6xx_submit()
89 struct msm_ringbuffer *ring = submit->ring; in a6xx_submit()
111 for (i = 0; i < submit->nr_cmds; i++) { in a6xx_submit()
112 switch (submit->cmd[i].type) { in a6xx_submit()
121 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a6xx_submit()
122 OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); in a6xx_submit()
123 OUT_RING(ring, submit->cmd[i].size); in a6xx_submit()
135 OUT_RING(ring, submit->seqno); in a6xx_submit()
145 OUT_RING(ring, submit->seqno); in a6xx_submit()
[all …]
/Linux-v5.4/Documentation/crypto/
Dasync-tx-api.txt57 async_<operation>(<op specific parameters>, struct async_submit ctl *submit)
79 operations complete. When an application needs to submit a chain of
110 async_<operation>, or when the application needs to submit a chain of
117 2/ Completion callback routines cannot submit new operations. This
142 struct async_submit_ctl submit;
146 init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL,
148 tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, &submit)
150 submit->depend_tx = tx;
151 tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len, &submit);
154 init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST | ASYNC_TX_ACK, tx,
[all …]
/Linux-v5.4/drivers/gpu/drm/i915/
Di915_request.c108 i915_sw_fence_fini(&rq->submit); in i915_fence_release()
135 cb->hook(container_of(cb->fence, struct i915_request, submit), in irq_execute_cb_hook()
231 GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); in i915_request_retire()
354 cb->fence = &rq->submit; in __i915_request_await_execution()
533 container_of(fence, typeof(*request), submit); in submit_notify()
693 i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify); in __i915_request_create()
796 return i915_sw_fence_await_dma_fence(&rq->submit, in i915_request_await_start()
833 return i915_sw_fence_await_dma_fence(&to->submit, in emit_semaphore_wait()
895 ret = i915_sw_fence_await_sw_fence_gfp(&to->submit, in i915_request_await_request()
896 &from->submit, in i915_request_await_request()
[all …]
/Linux-v5.4/tools/io_uring/
Dqueue.c83 goto submit; in io_uring_submit()
125 submit: in io_uring_submit()

1234567