Lines Matching refs:sqe
269 const struct io_uring_sqe *sqe; member
489 if (req->submit.sqe) { in io_queue_async_work()
490 switch (req->submit.sqe->opcode) { in io_queue_async_work()
1078 const struct io_uring_sqe *sqe = s->sqe; in io_prep_rw() local
1099 kiocb->ki_pos = READ_ONCE(sqe->off); in io_prep_rw()
1103 ioprio = READ_ONCE(sqe->ioprio); in io_prep_rw()
1113 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags)); in io_prep_rw()
1163 const struct io_uring_sqe *sqe, in io_import_fixed() argument
1166 size_t len = READ_ONCE(sqe->len); in io_import_fixed()
1176 buf_index = READ_ONCE(sqe->buf_index); in io_import_fixed()
1182 buf_addr = READ_ONCE(sqe->addr); in io_import_fixed()
1240 const struct io_uring_sqe *sqe = s->sqe; in io_import_iovec() local
1241 void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_import_iovec()
1242 size_t sqe_len = READ_ONCE(sqe->len); in io_import_iovec()
1253 opcode = READ_ONCE(sqe->opcode); in io_import_iovec()
1256 ssize_t ret = io_import_fixed(ctx, rw, sqe, iter); in io_import_iovec()
1533 static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_fsync() argument
1542 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index)) in io_prep_fsync()
1548 static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_fsync() argument
1551 loff_t sqe_off = READ_ONCE(sqe->off); in io_fsync()
1552 loff_t sqe_len = READ_ONCE(sqe->len); in io_fsync()
1557 fsync_flags = READ_ONCE(sqe->fsync_flags); in io_fsync()
1561 ret = io_prep_fsync(req, sqe); in io_fsync()
1575 io_cqring_add_event(req->ctx, sqe->user_data, ret); in io_fsync()
1580 static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_sfr() argument
1590 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index)) in io_prep_sfr()
1597 const struct io_uring_sqe *sqe, in io_sync_file_range() argument
1605 ret = io_prep_sfr(req, sqe); in io_sync_file_range()
1613 sqe_off = READ_ONCE(sqe->off); in io_sync_file_range()
1614 sqe_len = READ_ONCE(sqe->len); in io_sync_file_range()
1615 flags = READ_ONCE(sqe->sync_range_flags); in io_sync_file_range()
1621 io_cqring_add_event(req->ctx, sqe->user_data, ret); in io_sync_file_range()
1627 static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_send_recvmsg() argument
1643 flags = READ_ONCE(sqe->msg_flags); in io_send_recvmsg()
1650 READ_ONCE(sqe->addr); in io_send_recvmsg()
1657 io_cqring_add_event(req->ctx, sqe->user_data, ret); in io_send_recvmsg()
1663 static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_sendmsg() argument
1667 return io_send_recvmsg(req, sqe, force_nonblock, __sys_sendmsg_sock); in io_sendmsg()
1673 static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_recvmsg() argument
1677 return io_send_recvmsg(req, sqe, force_nonblock, __sys_recvmsg_sock); in io_recvmsg()
1714 static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_poll_remove() argument
1722 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index || in io_poll_remove()
1723 sqe->poll_events) in io_poll_remove()
1728 if (READ_ONCE(sqe->addr) == poll_req->user_data) { in io_poll_remove()
1736 io_cqring_add_event(req->ctx, sqe->user_data, ret); in io_poll_remove()
1832 static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_poll_add() argument
1843 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index) in io_poll_add()
1848 req->submit.sqe = NULL; in io_poll_add()
1850 events = READ_ONCE(sqe->poll_events); in io_poll_add()
1932 static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_timeout() argument
1942 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->timeout_flags || in io_timeout()
1943 sqe->len != 1) in io_timeout()
1946 if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr))) in io_timeout()
1956 count = READ_ONCE(sqe->off); in io_timeout()
2019 const struct io_uring_sqe *sqe) in io_req_defer() argument
2037 memcpy(sqe_copy, sqe, sizeof(*sqe_copy)); in io_req_defer()
2038 req->submit.sqe = sqe_copy; in io_req_defer()
2051 req->user_data = READ_ONCE(s->sqe->user_data); in __io_submit_sqe()
2056 opcode = READ_ONCE(s->sqe->opcode); in __io_submit_sqe()
2062 if (unlikely(s->sqe->buf_index)) in __io_submit_sqe()
2067 if (unlikely(s->sqe->buf_index)) in __io_submit_sqe()
2078 ret = io_fsync(req, s->sqe, force_nonblock); in __io_submit_sqe()
2081 ret = io_poll_add(req, s->sqe); in __io_submit_sqe()
2084 ret = io_poll_remove(req, s->sqe); in __io_submit_sqe()
2087 ret = io_sync_file_range(req, s->sqe, force_nonblock); in __io_submit_sqe()
2090 ret = io_sendmsg(req, s->sqe, force_nonblock); in __io_submit_sqe()
2093 ret = io_recvmsg(req, s->sqe, force_nonblock); in __io_submit_sqe()
2096 ret = io_timeout(req, s->sqe); in __io_submit_sqe()
2122 const struct io_uring_sqe *sqe) in io_async_list_from_sqe() argument
2124 switch (sqe->opcode) { in io_async_list_from_sqe()
2136 static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe) in io_sqe_needs_user() argument
2138 u8 opcode = READ_ONCE(sqe->opcode); in io_sqe_needs_user()
2154 async_list = io_async_list_from_sqe(ctx, req->submit.sqe); in io_sq_wq_submit_work()
2158 const struct io_uring_sqe *sqe = s->sqe; in io_sq_wq_submit_work() local
2165 if (io_sqe_needs_user(sqe) && !cur_mm) { in io_sq_wq_submit_work()
2197 io_cqring_add_event(ctx, sqe->user_data, ret); in io_sq_wq_submit_work()
2202 kfree(sqe); in io_sq_wq_submit_work()
2294 static bool io_op_needs_file(const struct io_uring_sqe *sqe) in io_op_needs_file() argument
2296 int op = READ_ONCE(sqe->opcode); in io_op_needs_file()
2314 flags = READ_ONCE(s->sqe->flags); in io_req_set_file()
2315 fd = READ_ONCE(s->sqe->fd); in io_req_set_file()
2326 if (!io_op_needs_file(s->sqe)) in io_req_set_file()
2361 sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL); in __io_queue_sqe()
2365 s->sqe = sqe_copy; in __io_queue_sqe()
2367 list = io_async_list_from_sqe(ctx, s->sqe); in __io_queue_sqe()
2402 ret = io_req_defer(ctx, req, s->sqe); in io_queue_sqe()
2406 io_cqring_add_event(ctx, s->sqe->user_data, ret); in io_queue_sqe()
2429 ret = io_req_defer(ctx, req, s->sqe); in io_queue_link_head()
2434 io_cqring_add_event(ctx, s->sqe->user_data, ret); in io_queue_link_head()
2466 if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) { in io_submit_sqe()
2482 io_cqring_add_event(ctx, s->sqe->user_data, ret); in io_submit_sqe()
2486 req->user_data = s->sqe->user_data; in io_submit_sqe()
2498 sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL); in io_submit_sqe()
2504 s->sqe = sqe_copy; in io_submit_sqe()
2507 } else if (s->sqe->flags & IOSQE_IO_LINK) { in io_submit_sqe()
2586 s->sqe = &ctx->sq_sqes[head]; in io_get_sqring()
2628 prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0; in io_submit_sqes()
2630 if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) { in io_submit_sqes()
2643 io_cqring_add_event(ctx, s.sqe->user_data, in io_submit_sqes()
2819 prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0; in io_ring_submit()
2821 if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) { in io_ring_submit()