1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/io_uring.h>
10 #include <linux/fsnotify.h>
11
12 #include <uapi/linux/io_uring.h>
13
14 #include "io_uring.h"
15 #include "sync.h"
16
17 struct io_sync {
18 struct file *file;
19 loff_t len;
20 loff_t off;
21 int flags;
22 int mode;
23 };
24
io_sfr_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)25 int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
26 {
27 struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
28
29 if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in))
30 return -EINVAL;
31
32 sync->off = READ_ONCE(sqe->off);
33 sync->len = READ_ONCE(sqe->len);
34 sync->flags = READ_ONCE(sqe->sync_range_flags);
35 return 0;
36 }
37
io_sync_file_range(struct io_kiocb * req,unsigned int issue_flags)38 int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
39 {
40 struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
41 int ret;
42
43 /* sync_file_range always requires a blocking context */
44 if (issue_flags & IO_URING_F_NONBLOCK)
45 return -EAGAIN;
46
47 ret = sync_file_range(req->file, sync->off, sync->len, sync->flags);
48 io_req_set_res(req, ret, 0);
49 return IOU_OK;
50 }
51
io_fsync_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)52 int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
53 {
54 struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
55
56 if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in))
57 return -EINVAL;
58
59 sync->flags = READ_ONCE(sqe->fsync_flags);
60 if (unlikely(sync->flags & ~IORING_FSYNC_DATASYNC))
61 return -EINVAL;
62
63 sync->off = READ_ONCE(sqe->off);
64 sync->len = READ_ONCE(sqe->len);
65 return 0;
66 }
67
io_fsync(struct io_kiocb * req,unsigned int issue_flags)68 int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
69 {
70 struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
71 loff_t end = sync->off + sync->len;
72 int ret;
73
74 /* fsync always requires a blocking context */
75 if (issue_flags & IO_URING_F_NONBLOCK)
76 return -EAGAIN;
77
78 ret = vfs_fsync_range(req->file, sync->off, end > 0 ? end : LLONG_MAX,
79 sync->flags & IORING_FSYNC_DATASYNC);
80 io_req_set_res(req, ret, 0);
81 return IOU_OK;
82 }
83
io_fallocate_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)84 int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
85 {
86 struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
87
88 if (sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
89 return -EINVAL;
90
91 sync->off = READ_ONCE(sqe->off);
92 sync->len = READ_ONCE(sqe->addr);
93 sync->mode = READ_ONCE(sqe->len);
94 return 0;
95 }
96
io_fallocate(struct io_kiocb * req,unsigned int issue_flags)97 int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
98 {
99 struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
100 int ret;
101
102 /* fallocate always requiring blocking context */
103 if (issue_flags & IO_URING_F_NONBLOCK)
104 return -EAGAIN;
105 ret = vfs_fallocate(req->file, sync->mode, sync->off, sync->len);
106 if (ret >= 0)
107 fsnotify_modify(req->file);
108 io_req_set_res(req, ret, 0);
109 return IOU_OK;
110 }
111