Home
last modified time | relevance | path

Searched refs:sqe (Results 1 – 25 of 84) sorted by relevance

1234

/Linux-v6.1/tools/io_uring/
Dliburing.h98 static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data) in io_uring_sqe_set_data() argument
100 sqe->user_data = (unsigned long) data; in io_uring_sqe_set_data()
108 static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd, in io_uring_prep_rw() argument
112 memset(sqe, 0, sizeof(*sqe)); in io_uring_prep_rw()
113 sqe->opcode = op; in io_uring_prep_rw()
114 sqe->fd = fd; in io_uring_prep_rw()
115 sqe->off = offset; in io_uring_prep_rw()
116 sqe->addr = (unsigned long) addr; in io_uring_prep_rw()
117 sqe->len = len; in io_uring_prep_rw()
120 static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd, in io_uring_prep_readv() argument
[all …]
Dio_uring-cp.c71 struct io_uring_sqe *sqe; in queue_prepped() local
73 sqe = io_uring_get_sqe(ring); in queue_prepped()
74 assert(sqe); in queue_prepped()
77 io_uring_prep_readv(sqe, infd, &data->iov, 1, data->offset); in queue_prepped()
79 io_uring_prep_writev(sqe, outfd, &data->iov, 1, data->offset); in queue_prepped()
81 io_uring_sqe_set_data(sqe, data); in queue_prepped()
86 struct io_uring_sqe *sqe; in queue_read() local
93 sqe = io_uring_get_sqe(ring); in queue_read()
94 if (!sqe) { in queue_read()
106 io_uring_prep_readv(sqe, infd, &data->iov, 1, offset); in queue_read()
[all …]
Dio_uring-bench.c145 struct io_uring_sqe *sqe = &s->sqes[index]; in init_io() local
151 sqe->opcode = IORING_OP_NOP; in init_io()
172 sqe->flags = IOSQE_FIXED_FILE; in init_io()
173 sqe->fd = f->fixed_fd; in init_io()
175 sqe->flags = 0; in init_io()
176 sqe->fd = f->real_fd; in init_io()
179 sqe->opcode = IORING_OP_READ_FIXED; in init_io()
180 sqe->addr = (unsigned long) s->iovecs[index].iov_base; in init_io()
181 sqe->len = BS; in init_io()
182 sqe->buf_index = index; in init_io()
[all …]
/Linux-v6.1/io_uring/
Dfs.c50 int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_renameat_prep() argument
55 if (sqe->buf_index || sqe->splice_fd_in) in io_renameat_prep()
60 ren->old_dfd = READ_ONCE(sqe->fd); in io_renameat_prep()
61 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_renameat_prep()
62 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_renameat_prep()
63 ren->new_dfd = READ_ONCE(sqe->len); in io_renameat_prep()
64 ren->flags = READ_ONCE(sqe->rename_flags); in io_renameat_prep()
104 int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_unlinkat_prep() argument
109 if (sqe->off || sqe->len || sqe->buf_index || sqe->splice_fd_in) in io_unlinkat_prep()
114 un->dfd = READ_ONCE(sqe->fd); in io_unlinkat_prep()
[all …]
Dsync.c25 int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sfr_prep() argument
29 if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in)) in io_sfr_prep()
32 sync->off = READ_ONCE(sqe->off); in io_sfr_prep()
33 sync->len = READ_ONCE(sqe->len); in io_sfr_prep()
34 sync->flags = READ_ONCE(sqe->sync_range_flags); in io_sfr_prep()
52 int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fsync_prep() argument
56 if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in)) in io_fsync_prep()
59 sync->flags = READ_ONCE(sqe->fsync_flags); in io_fsync_prep()
63 sync->off = READ_ONCE(sqe->off); in io_fsync_prep()
64 sync->len = READ_ONCE(sqe->len); in io_fsync_prep()
[all …]
Dadvise.c31 int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_madvise_prep() argument
36 if (sqe->buf_index || sqe->off || sqe->splice_fd_in) in io_madvise_prep()
39 ma->addr = READ_ONCE(sqe->addr); in io_madvise_prep()
40 ma->len = READ_ONCE(sqe->len); in io_madvise_prep()
41 ma->advice = READ_ONCE(sqe->fadvise_advice); in io_madvise_prep()
65 int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fadvise_prep() argument
69 if (sqe->buf_index || sqe->addr || sqe->splice_fd_in) in io_fadvise_prep()
72 fa->offset = READ_ONCE(sqe->off); in io_fadvise_prep()
73 fa->len = READ_ONCE(sqe->len); in io_fadvise_prep()
74 fa->advice = READ_ONCE(sqe->fadvise_advice); in io_fadvise_prep()
Dopenclose.c34 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in __io_openat_prep() argument
40 if (unlikely(sqe->buf_index)) in __io_openat_prep()
49 open->dfd = READ_ONCE(sqe->fd); in __io_openat_prep()
50 fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); in __io_openat_prep()
58 open->file_slot = READ_ONCE(sqe->file_index); in __io_openat_prep()
67 int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat_prep() argument
70 u64 mode = READ_ONCE(sqe->len); in io_openat_prep()
71 u64 flags = READ_ONCE(sqe->open_flags); in io_openat_prep()
74 return __io_openat_prep(req, sqe); in io_openat_prep()
77 int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat2_prep() argument
[all …]
Dxattr.c45 const struct io_uring_sqe *sqe) in __io_getxattr_prep() argument
56 name = u64_to_user_ptr(READ_ONCE(sqe->addr)); in __io_getxattr_prep()
57 ix->ctx.cvalue = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in __io_getxattr_prep()
58 ix->ctx.size = READ_ONCE(sqe->len); in __io_getxattr_prep()
59 ix->ctx.flags = READ_ONCE(sqe->xattr_flags); in __io_getxattr_prep()
81 int io_fgetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fgetxattr_prep() argument
83 return __io_getxattr_prep(req, sqe); in io_fgetxattr_prep()
86 int io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_getxattr_prep() argument
92 ret = __io_getxattr_prep(req, sqe); in io_getxattr_prep()
96 path = u64_to_user_ptr(READ_ONCE(sqe->addr3)); in io_getxattr_prep()
[all …]
Dsplice.c27 const struct io_uring_sqe *sqe) in __io_splice_prep() argument
32 sp->len = READ_ONCE(sqe->len); in __io_splice_prep()
33 sp->flags = READ_ONCE(sqe->splice_flags); in __io_splice_prep()
36 sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in); in __io_splice_prep()
40 int io_tee_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_tee_prep() argument
42 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off)) in io_tee_prep()
44 return __io_splice_prep(req, sqe); in io_tee_prep()
79 int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_splice_prep() argument
83 sp->off_in = READ_ONCE(sqe->splice_off_in); in io_splice_prep()
84 sp->off_out = READ_ONCE(sqe->off); in io_splice_prep()
[all …]
Dstatx.c23 int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_statx_prep() argument
28 if (sqe->buf_index || sqe->splice_fd_in) in io_statx_prep()
33 sx->dfd = READ_ONCE(sqe->fd); in io_statx_prep()
34 sx->mask = READ_ONCE(sqe->len); in io_statx_prep()
35 path = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_statx_prep()
36 sx->buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_statx_prep()
37 sx->flags = READ_ONCE(sqe->statx_flags); in io_statx_prep()
Depoll.c24 int io_epoll_ctl_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_epoll_ctl_prep() argument
32 if (sqe->buf_index || sqe->splice_fd_in) in io_epoll_ctl_prep()
35 epoll->epfd = READ_ONCE(sqe->fd); in io_epoll_ctl_prep()
36 epoll->op = READ_ONCE(sqe->len); in io_epoll_ctl_prep()
37 epoll->fd = READ_ONCE(sqe->off); in io_epoll_ctl_prep()
42 ev = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_epoll_ctl_prep()
Dnet.h31 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
36 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
43 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
49 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
52 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
56 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
61 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
Dnet.c70 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_shutdown_prep() argument
74 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags || in io_shutdown_prep()
75 sqe->buf_index || sqe->splice_fd_in)) in io_shutdown_prep()
78 shutdown->how = READ_ONCE(sqe->len); in io_shutdown_prep()
238 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sendmsg_prep() argument
243 if (READ_ONCE(sqe->__pad3[0])) in io_sendmsg_prep()
245 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_sendmsg_prep()
246 sr->addr_len = READ_ONCE(sqe->addr_len); in io_sendmsg_prep()
247 } else if (sqe->addr2 || sqe->file_index) { in io_sendmsg_prep()
251 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_sendmsg_prep()
[all …]
Dmsg_ring.c126 int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_msg_ring_prep() argument
130 if (unlikely(sqe->buf_index || sqe->personality)) in io_msg_ring_prep()
133 msg->user_data = READ_ONCE(sqe->off); in io_msg_ring_prep()
134 msg->len = READ_ONCE(sqe->len); in io_msg_ring_prep()
135 msg->cmd = READ_ONCE(sqe->addr); in io_msg_ring_prep()
136 msg->src_fd = READ_ONCE(sqe->addr3); in io_msg_ring_prep()
137 msg->dst_fd = READ_ONCE(sqe->file_index); in io_msg_ring_prep()
138 msg->flags = READ_ONCE(sqe->msg_ring_flags); in io_msg_ring_prep()
Dfs.h3 int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
7 int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
11 int io_mkdirat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
15 int io_symlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
18 int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
Dfdinfo.c91 struct io_uring_sqe *sqe; in __io_uring_show_fdinfo() local
97 sqe = &ctx->sq_sqes[sq_idx << sq_shift]; in __io_uring_show_fdinfo()
101 sq_idx, io_uring_get_opcode(sqe->opcode), sqe->fd, in __io_uring_show_fdinfo()
102 sqe->flags, (unsigned long long) sqe->off, in __io_uring_show_fdinfo()
103 (unsigned long long) sqe->addr, sqe->rw_flags, in __io_uring_show_fdinfo()
104 sqe->buf_index, sqe->user_data); in __io_uring_show_fdinfo()
106 u64 *sqeb = (void *) (sqe + 1); in __io_uring_show_fdinfo()
Dxattr.h5 int io_fsetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
8 int io_setxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
11 int io_fgetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
14 int io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
Dkbuf.c273 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_remove_buffers_prep() argument
278 if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off || in io_remove_buffers_prep()
279 sqe->splice_fd_in) in io_remove_buffers_prep()
282 tmp = READ_ONCE(sqe->fd); in io_remove_buffers_prep()
288 p->bgid = READ_ONCE(sqe->buf_group); in io_remove_buffers_prep()
319 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_provide_buffers_prep() argument
325 if (sqe->rw_flags || sqe->splice_fd_in) in io_provide_buffers_prep()
328 tmp = READ_ONCE(sqe->fd); in io_provide_buffers_prep()
332 p->addr = READ_ONCE(sqe->addr); in io_provide_buffers_prep()
333 p->len = READ_ONCE(sqe->len); in io_provide_buffers_prep()
[all …]
/Linux-v6.1/drivers/infiniband/sw/siw/
Dsiw_qp.c275 wqe->sqe.flags = 0; in siw_qp_mpa_rts()
276 wqe->sqe.num_sge = 1; in siw_qp_mpa_rts()
277 wqe->sqe.sge[0].length = 0; in siw_qp_mpa_rts()
278 wqe->sqe.sge[0].laddr = 0; in siw_qp_mpa_rts()
279 wqe->sqe.sge[0].lkey = 0; in siw_qp_mpa_rts()
284 wqe->sqe.rkey = 1; in siw_qp_mpa_rts()
285 wqe->sqe.raddr = 0; in siw_qp_mpa_rts()
289 wqe->sqe.opcode = SIW_OP_WRITE; in siw_qp_mpa_rts()
293 wqe->sqe.opcode = SIW_OP_READ; in siw_qp_mpa_rts()
300 siw_read_to_orq(rreq, &wqe->sqe); in siw_qp_mpa_rts()
[all …]
Dsiw_qp_tx.c43 struct siw_sge *sge = &wqe->sqe.sge[0]; in siw_try_1seg()
46 if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1) in siw_try_1seg()
53 memcpy(paddr, &wqe->sqe.sge[1], bytes); in siw_try_1seg()
137 c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey); in siw_qp_prepare_tx()
139 cpu_to_be64(wqe->sqe.sge[0].laddr); in siw_qp_prepare_tx()
140 c_tx->pkt.rreq.source_stag = htonl(wqe->sqe.rkey); in siw_qp_prepare_tx()
141 c_tx->pkt.rreq.source_to = cpu_to_be64(wqe->sqe.raddr); in siw_qp_prepare_tx()
142 c_tx->pkt.rreq.read_size = htonl(wqe->sqe.sge[0].length); in siw_qp_prepare_tx()
185 c_tx->pkt.send_inv.inval_stag = cpu_to_be32(wqe->sqe.rkey); in siw_qp_prepare_tx()
197 c_tx->pkt.rwrite.sink_stag = htonl(wqe->sqe.rkey); in siw_qp_prepare_tx()
[all …]
Dsiw_verbs.c644 struct siw_sqe *sqe) in siw_copy_inline_sgl() argument
647 void *kbuf = &sqe->sge[1]; in siw_copy_inline_sgl()
650 sqe->sge[0].laddr = (uintptr_t)kbuf; in siw_copy_inline_sgl()
651 sqe->sge[0].lkey = 0; in siw_copy_inline_sgl()
669 sqe->sge[0].length = max(bytes, 0); in siw_copy_inline_sgl()
670 sqe->num_sge = bytes > 0 ? 1 : 0; in siw_copy_inline_sgl()
679 struct siw_sqe sqe = {}; in siw_sq_flush_wr() local
683 sqe.id = wr->wr_id; in siw_sq_flush_wr()
684 sqe.opcode = wr->opcode; in siw_sq_flush_wr()
685 rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR); in siw_sq_flush_wr()
[all …]
/Linux-v6.1/drivers/crypto/hisilicon/zip/
Dzip_crypto.c101 void (*fill_addr)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
102 void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
103 void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type);
104 void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type);
105 void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
106 void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type);
107 u32 (*get_tag)(struct hisi_zip_sqe *sqe);
108 u32 (*get_status)(struct hisi_zip_sqe *sqe);
109 u32 (*get_dstlen)(struct hisi_zip_sqe *sqe);
263 static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) in hisi_zip_fill_addr() argument
[all …]
/Linux-v6.1/tools/testing/selftests/net/
Dio_uring_zerocopy_tx.c271 static inline void io_uring_prep_send(struct io_uring_sqe *sqe, int sockfd, in io_uring_prep_send() argument
274 memset(sqe, 0, sizeof(*sqe)); in io_uring_prep_send()
275 sqe->opcode = (__u8) IORING_OP_SEND; in io_uring_prep_send()
276 sqe->fd = sockfd; in io_uring_prep_send()
277 sqe->addr = (unsigned long) buf; in io_uring_prep_send()
278 sqe->len = len; in io_uring_prep_send()
279 sqe->msg_flags = (__u32) flags; in io_uring_prep_send()
282 static inline void io_uring_prep_sendzc(struct io_uring_sqe *sqe, int sockfd, in io_uring_prep_sendzc() argument
286 io_uring_prep_send(sqe, sockfd, buf, len, flags); in io_uring_prep_sendzc()
287 sqe->opcode = (__u8) IORING_OP_SEND_ZC; in io_uring_prep_sendzc()
[all …]
/Linux-v6.1/drivers/net/ethernet/qlogic/qed/
Dqed_nvmetcp_fw_funcs.c68 if (!task_params->sqe) in init_sqe()
71 memset(task_params->sqe, 0, sizeof(*task_params->sqe)); in init_sqe()
72 task_params->sqe->task_id = cpu_to_le16(task_params->itid); in init_sqe()
79 SET_FIELD(task_params->sqe->contlen_cdbsize, in init_sqe()
81 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, in init_sqe()
94 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES, num_sges); in init_sqe()
95 SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN, buf_size); in init_sqe()
99 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, in init_sqe()
101 SET_FIELD(task_params->sqe->contlen_cdbsize, in init_sqe()
106 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, in init_sqe()
[all …]
/Linux-v6.1/include/trace/events/
Dio_uring.h501 TP_PROTO(const struct io_uring_sqe *sqe, struct io_kiocb *req, int error),
503 TP_ARGS(sqe, req, error),
523 __string( op_str, io_uring_get_opcode(sqe->opcode) )
529 __entry->user_data = sqe->user_data;
530 __entry->opcode = sqe->opcode;
531 __entry->flags = sqe->flags;
532 __entry->ioprio = sqe->ioprio;
533 __entry->off = sqe->off;
534 __entry->addr = sqe->addr;
535 __entry->len = sqe->len;
[all …]

1234