Lines Matching full:if
20 #if defined(CONFIG_NET)
74 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags || in io_shutdown_prep()
88 if (issue_flags & IO_URING_F_NONBLOCK) in io_shutdown()
92 if (unlikely(!sock)) in io_shutdown()
102 if (!(flags & MSG_WAITALL)) in io_net_retry()
111 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED) in io_netmsg_recycle()
114 /* Let normal cleanup path reap it if we fail adding to the cache */ in io_netmsg_recycle()
115 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) { in io_netmsg_recycle()
128 if (!(issue_flags & IO_URING_F_UNLOCKED) && in io_msg_alloc_async()
137 if (!io_alloc_async_data(req)) { in io_msg_alloc_async()
157 if (req_has_async_data(req)) in io_setup_async_msg()
160 if (!async_msg) { in io_setup_async_msg()
166 if (async_msg->msg.msg_name) in io_setup_async_msg()
168 /* if were using fast_iov, set it to the new one */ in io_setup_async_msg()
169 if (!kmsg->free_iov) { in io_setup_async_msg()
194 if (!zc->addr || req_has_async_data(req)) in io_send_prep_async()
197 if (!io) in io_send_prep_async()
210 if (!sr->addr || req_has_async_data(req)) in io_setup_async_addr()
213 if (!io) in io_setup_async_addr()
223 if (!io_msg_alloc_async_prep(req)) in io_sendmsg_prep_async()
226 if (!ret) in io_sendmsg_prep_async()
242 if (req->opcode == IORING_OP_SEND) { in io_sendmsg_prep()
243 if (READ_ONCE(sqe->__pad3[0])) in io_sendmsg_prep()
247 } else if (sqe->addr2 || sqe->file_index) { in io_sendmsg_prep()
254 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST) in io_sendmsg_prep()
257 if (sr->msg_flags & MSG_DONTWAIT) in io_sendmsg_prep()
261 if (req->ctx->compat) in io_sendmsg_prep()
278 if (unlikely(!sock)) in io_sendmsg()
281 if (req_has_async_data(req)) { in io_sendmsg()
285 if (ret) in io_sendmsg()
290 if (!(req->flags & REQ_F_POLLED) && in io_sendmsg()
295 if (issue_flags & IO_URING_F_NONBLOCK) in io_sendmsg()
297 if (flags & MSG_WAITALL) in io_sendmsg()
302 if (ret < min_ret) { in io_sendmsg()
303 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) in io_sendmsg()
305 if (ret > 0 && io_net_retry(sock, flags)) { in io_sendmsg()
310 if (ret == -ERESTARTSYS) in io_sendmsg()
315 if (kmsg->free_iov) in io_sendmsg()
319 if (ret >= 0) in io_sendmsg()
321 else if (sr->done_io) in io_sendmsg()
344 if (sr->addr) { in io_send()
345 if (req_has_async_data(req)) { in io_send()
351 if (unlikely(ret < 0)) in io_send()
358 if (!(req->flags & REQ_F_POLLED) && in io_send()
363 if (unlikely(!sock)) in io_send()
367 if (unlikely(ret)) in io_send()
371 if (issue_flags & IO_URING_F_NONBLOCK) in io_send()
373 if (flags & MSG_WAITALL) in io_send()
378 if (ret < min_ret) { in io_send()
379 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) in io_send()
382 if (ret > 0 && io_net_retry(sock, flags)) { in io_send()
389 if (ret == -ERESTARTSYS) in io_send()
393 if (ret >= 0) in io_send()
395 else if (sr->done_io) in io_send()
405 if (iomsg->namelen < 0) in io_recvmsg_multishot_overflow()
407 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out), in io_recvmsg_multishot_overflow()
410 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr)) in io_recvmsg_multishot_overflow()
423 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg))) in __io_recvmsg_copy_hdr()
427 if (ret) in __io_recvmsg_copy_hdr()
430 if (req->flags & REQ_F_BUFFER_SELECT) { in __io_recvmsg_copy_hdr()
431 if (msg.msg_iovlen == 0) { in __io_recvmsg_copy_hdr()
435 } else if (msg.msg_iovlen > 1) { in __io_recvmsg_copy_hdr()
438 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov))) in __io_recvmsg_copy_hdr()
444 if (req->flags & REQ_F_APOLL_MULTISHOT) { in __io_recvmsg_copy_hdr()
447 if (io_recvmsg_multishot_overflow(iomsg)) in __io_recvmsg_copy_hdr()
455 if (ret > 0) in __io_recvmsg_copy_hdr()
471 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg))) in __io_compat_recvmsg_copy_hdr()
475 if (ret) in __io_compat_recvmsg_copy_hdr()
479 if (req->flags & REQ_F_BUFFER_SELECT) { in __io_compat_recvmsg_copy_hdr()
482 if (msg.msg_iovlen == 0) { in __io_compat_recvmsg_copy_hdr()
484 } else if (msg.msg_iovlen > 1) { in __io_compat_recvmsg_copy_hdr()
487 if (!access_ok(uiov, sizeof(*uiov))) in __io_compat_recvmsg_copy_hdr()
489 if (__get_user(clen, &uiov->iov_len)) in __io_compat_recvmsg_copy_hdr()
491 if (clen < 0) in __io_compat_recvmsg_copy_hdr()
496 if (req->flags & REQ_F_APOLL_MULTISHOT) { in __io_compat_recvmsg_copy_hdr()
499 if (io_recvmsg_multishot_overflow(iomsg)) in __io_compat_recvmsg_copy_hdr()
507 if (ret < 0) in __io_compat_recvmsg_copy_hdr()
521 if (req->ctx->compat) in io_recvmsg_copy_hdr()
532 if (!io_msg_alloc_async_prep(req)) in io_recvmsg_prep_async()
535 if (!ret) in io_recvmsg_prep_async()
546 if (unlikely(sqe->file_index || sqe->addr2)) in io_recvmsg_prep()
552 if (sr->flags & ~(RECVMSG_FLAGS)) in io_recvmsg_prep()
555 if (sr->msg_flags & MSG_DONTWAIT) in io_recvmsg_prep()
557 if (sr->msg_flags & MSG_ERRQUEUE) in io_recvmsg_prep()
559 if (sr->flags & IORING_RECV_MULTISHOT) { in io_recvmsg_prep()
560 if (!(req->flags & REQ_F_BUFFER_SELECT)) in io_recvmsg_prep()
562 if (sr->msg_flags & MSG_WAITALL) in io_recvmsg_prep()
564 if (req->opcode == IORING_OP_RECV && sr->len) in io_recvmsg_prep()
570 if (req->ctx->compat) in io_recvmsg_prep()
588 * Returns true if it is actually finished, or false if it should run
595 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { in io_recv_finish()
601 if (!mshot_finished) { in io_recv_finish()
602 if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret, in io_recv_finish()
616 if (issue_flags & IO_URING_F_MULTISHOT) in io_recv_finish()
632 if (*len < hdr) in io_recvmsg_prep_multishot()
635 if (kmsg->controllen) { in io_recvmsg_prep_multishot()
661 if (kmsg->namelen) in io_recvmsg_multishot()
666 if (sock->file->f_flags & O_NONBLOCK) in io_recvmsg_multishot()
671 if (err < 0) in io_recvmsg_multishot()
680 if (err > kmsg->payloadlen) in io_recvmsg_multishot()
684 if (kmsg->msg.msg_namelen > kmsg->namelen) in io_recvmsg_multishot()
698 if (copy_to_user(io->buf, &hdr, copy_len)) { in io_recvmsg_multishot()
719 if (unlikely(!sock)) in io_recvmsg()
722 if (req_has_async_data(req)) { in io_recvmsg()
726 if (ret) in io_recvmsg()
731 if (!(req->flags & REQ_F_POLLED) && in io_recvmsg()
736 if (io_do_buffer_select(req)) { in io_recvmsg()
741 if (!buf) in io_recvmsg()
744 if (req->flags & REQ_F_APOLL_MULTISHOT) { in io_recvmsg()
746 if (ret) { in io_recvmsg()
759 if (force_nonblock) in io_recvmsg()
761 if (flags & MSG_WAITALL) in io_recvmsg()
765 if (req->flags & REQ_F_APOLL_MULTISHOT) in io_recvmsg()
772 if (ret < min_ret) { in io_recvmsg()
773 if (ret == -EAGAIN && force_nonblock) { in io_recvmsg()
775 if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) { in io_recvmsg()
781 if (ret > 0 && io_net_retry(sock, flags)) { in io_recvmsg()
786 if (ret == -ERESTARTSYS) in io_recvmsg()
789 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { in io_recvmsg()
793 if (ret > 0) in io_recvmsg()
795 else if (sr->done_io) in io_recvmsg()
801 if (kmsg->msg.msg_inq) in io_recvmsg()
804 if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags)) in io_recvmsg()
807 if (mshot_finished) { in io_recvmsg()
810 if (kmsg->free_iov) in io_recvmsg()
830 if (!(req->flags & REQ_F_POLLED) && in io_recv()
835 if (unlikely(!sock)) in io_recv()
839 if (io_do_buffer_select(req)) { in io_recv()
843 if (!buf) in io_recv()
849 if (unlikely(ret)) in io_recv()
862 if (force_nonblock) in io_recv()
864 if (flags & MSG_WAITALL) in io_recv()
868 if (ret < min_ret) { in io_recv()
869 if (ret == -EAGAIN && force_nonblock) { in io_recv()
870 if (issue_flags & IO_URING_F_MULTISHOT) { in io_recv()
877 if (ret > 0 && io_net_retry(sock, flags)) { in io_recv()
884 if (ret == -ERESTARTSYS) in io_recv()
887 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { in io_recv()
892 if (ret > 0) in io_recv()
894 else if (sr->done_io) in io_recv()
900 if (msg.msg_inq) in io_recv()
903 if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags)) in io_recv()
914 if (req_has_async_data(req)) { in io_send_zc_cleanup()
916 /* might be ->fast_iov if *msg_copy_hdr failed */ in io_send_zc_cleanup()
917 if (io->free_iov != io->fast_iov) in io_send_zc_cleanup()
920 if (zc->notif) { in io_send_zc_cleanup()
932 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))) in io_send_zc_prep()
935 if (req->flags & REQ_F_CQE_SKIP) in io_send_zc_prep()
939 if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST | in io_send_zc_prep()
943 if (!notif) in io_send_zc_prep()
949 if (zc->flags & IORING_RECVSEND_FIXED_BUF) { in io_send_zc_prep()
952 if (unlikely(idx >= ctx->nr_user_bufs)) in io_send_zc_prep()
959 if (req->opcode == IORING_OP_SEND_ZC) { in io_send_zc_prep()
960 if (READ_ONCE(sqe->__pad3[0])) in io_send_zc_prep()
965 if (unlikely(sqe->addr2 || sqe->file_index)) in io_send_zc_prep()
967 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF)) in io_send_zc_prep()
974 if (zc->msg_flags & MSG_DONTWAIT) in io_send_zc_prep()
980 if (req->ctx->compat) in io_send_zc_prep()
1003 if (!frag) in io_sg_from_iter()
1005 else if (unlikely(!skb_zcopy_managed(skb))) in io_sg_from_iter()
1021 if (bi.bi_size) in io_sg_from_iter()
1034 if (sk && sk->sk_type == SOCK_STREAM) { in io_sg_from_iter()
1036 if (!skb_zcopy_pure(skb)) in io_sg_from_iter()
1055 if (unlikely(!sock)) in io_send_zc()
1057 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) in io_send_zc()
1065 if (zc->addr) { in io_send_zc()
1066 if (req_has_async_data(req)) { in io_send_zc()
1072 if (unlikely(ret < 0)) in io_send_zc()
1079 if (!(req->flags & REQ_F_POLLED) && in io_send_zc()
1083 if (zc->flags & IORING_RECVSEND_FIXED_BUF) { in io_send_zc()
1086 if (unlikely(ret)) in io_send_zc()
1092 if (unlikely(ret)) in io_send_zc()
1095 if (unlikely(ret)) in io_send_zc()
1101 if (issue_flags & IO_URING_F_NONBLOCK) in io_send_zc()
1103 if (msg_flags & MSG_WAITALL) in io_send_zc()
1110 if (unlikely(ret < min_ret)) { in io_send_zc()
1111 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) in io_send_zc()
1114 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) { in io_send_zc()
1121 if (ret == -ERESTARTSYS) in io_send_zc()
1126 if (ret >= 0) in io_send_zc()
1128 else if (zc->done_io) in io_send_zc()
1132 * If we're in io-wq we can't rely on tw ordering guarantees, defer in io_send_zc()
1135 if (!(issue_flags & IO_URING_F_UNLOCKED)) { in io_send_zc()
1152 if (unlikely(!sock)) in io_sendmsg_zc()
1154 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) in io_sendmsg_zc()
1157 if (req_has_async_data(req)) { in io_sendmsg_zc()
1161 if (ret) in io_sendmsg_zc()
1166 if (!(req->flags & REQ_F_POLLED) && in io_sendmsg_zc()
1171 if (issue_flags & IO_URING_F_NONBLOCK) in io_sendmsg_zc()
1173 if (flags & MSG_WAITALL) in io_sendmsg_zc()
1180 if (unlikely(ret < min_ret)) { in io_sendmsg_zc()
1181 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) in io_sendmsg_zc()
1184 if (ret > 0 && io_net_retry(sock, flags)) { in io_sendmsg_zc()
1189 if (ret == -ERESTARTSYS) in io_sendmsg_zc()
1194 if (kmsg->free_iov) { in io_sendmsg_zc()
1200 if (ret >= 0) in io_sendmsg_zc()
1202 else if (sr->done_io) in io_sendmsg_zc()
1206 * If we're in io-wq we can't rely on tw ordering guarantees, defer in io_sendmsg_zc()
1209 if (!(issue_flags & IO_URING_F_UNLOCKED)) { in io_sendmsg_zc()
1221 if (req->flags & REQ_F_PARTIAL_IO) in io_sendrecv_fail()
1224 if ((req->flags & REQ_F_NEED_CLEANUP) && in io_sendrecv_fail()
1234 if (sqe->len || sqe->buf_index) in io_accept_prep()
1242 if (flags & ~IORING_ACCEPT_MULTISHOT) in io_accept_prep()
1246 if (accept->file_slot) { in io_accept_prep()
1247 if (accept->flags & SOCK_CLOEXEC) in io_accept_prep()
1249 if (flags & IORING_ACCEPT_MULTISHOT && in io_accept_prep()
1253 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) in io_accept_prep()
1255 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK)) in io_accept_prep()
1257 if (flags & IORING_ACCEPT_MULTISHOT) in io_accept_prep()
1273 if (!fixed) { in io_accept()
1275 if (unlikely(fd < 0)) in io_accept()
1280 if (IS_ERR(file)) { in io_accept()
1281 if (!fixed) in io_accept()
1284 if (ret == -EAGAIN && force_nonblock) { in io_accept()
1286 * if it's multishot and polled, we don't need to in io_accept()
1290 if (issue_flags & IO_URING_F_MULTISHOT) in io_accept()
1294 if (ret == -ERESTARTSYS) in io_accept()
1297 } else if (!fixed) { in io_accept()
1305 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { in io_accept()
1310 if (ret >= 0 && in io_accept()
1322 if (sqe->addr || sqe->rw_flags || sqe->buf_index) in io_socket_prep()
1332 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC)) in io_socket_prep()
1334 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) in io_socket_prep()
1346 if (!fixed) { in io_socket()
1348 if (unlikely(fd < 0)) in io_socket()
1352 if (IS_ERR(file)) { in io_socket()
1353 if (!fixed) in io_socket()
1356 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) in io_socket()
1358 if (ret == -ERESTARTSYS) in io_socket()
1361 } else if (!fixed) { in io_socket()
1384 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) in io_connect_prep()
1401 if (connect->in_progress) { in io_connect()
1406 if (socket) in io_connect()
1411 if (req_has_async_data(req)) { in io_connect()
1417 if (ret) in io_connect()
1426 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) { in io_connect()
1427 if (ret == -EINPROGRESS) { in io_connect()
1430 if (req_has_async_data(req)) in io_connect()
1432 if (io_alloc_async_data(req)) { in io_connect()
1440 if (ret == -ERESTARTSYS) in io_connect()
1443 if (ret < 0) in io_connect()