1 #ifndef LIB_URING_H
2 #define LIB_URING_H
3 
4 #ifdef __cplusplus
5 extern "C" {
6 #endif
7 
8 #include <sys/uio.h>
9 #include <signal.h>
10 #include <string.h>
11 #include "../../include/uapi/linux/io_uring.h"
12 #include <inttypes.h>
13 #include "barrier.h"
14 
15 /*
16  * Library interface to io_uring
17  */
18 struct io_uring_sq {
19 	unsigned *khead;
20 	unsigned *ktail;
21 	unsigned *kring_mask;
22 	unsigned *kring_entries;
23 	unsigned *kflags;
24 	unsigned *kdropped;
25 	unsigned *array;
26 	struct io_uring_sqe *sqes;
27 
28 	unsigned sqe_head;
29 	unsigned sqe_tail;
30 
31 	size_t ring_sz;
32 };
33 
34 struct io_uring_cq {
35 	unsigned *khead;
36 	unsigned *ktail;
37 	unsigned *kring_mask;
38 	unsigned *kring_entries;
39 	unsigned *koverflow;
40 	struct io_uring_cqe *cqes;
41 
42 	size_t ring_sz;
43 };
44 
45 struct io_uring {
46 	struct io_uring_sq sq;
47 	struct io_uring_cq cq;
48 	int ring_fd;
49 };
50 
51 /*
52  * System calls
53  */
54 extern int io_uring_setup(unsigned entries, struct io_uring_params *p);
55 extern int io_uring_enter(int fd, unsigned to_submit,
56 	unsigned min_complete, unsigned flags, sigset_t *sig);
57 extern int io_uring_register(int fd, unsigned int opcode, void *arg,
58 	unsigned int nr_args);
59 
60 /*
61  * Library interface
62  */
63 extern int io_uring_queue_init(unsigned entries, struct io_uring *ring,
64 	unsigned flags);
65 extern int io_uring_queue_mmap(int fd, struct io_uring_params *p,
66 	struct io_uring *ring);
67 extern void io_uring_queue_exit(struct io_uring *ring);
68 extern int io_uring_peek_cqe(struct io_uring *ring,
69 	struct io_uring_cqe **cqe_ptr);
70 extern int io_uring_wait_cqe(struct io_uring *ring,
71 	struct io_uring_cqe **cqe_ptr);
72 extern int io_uring_submit(struct io_uring *ring);
73 extern struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
74 
75 /*
76  * Must be called after io_uring_{peek,wait}_cqe() after the cqe has
77  * been processed by the application.
78  */
io_uring_cqe_seen(struct io_uring * ring,struct io_uring_cqe * cqe)79 static inline void io_uring_cqe_seen(struct io_uring *ring,
80 				     struct io_uring_cqe *cqe)
81 {
82 	if (cqe) {
83 		struct io_uring_cq *cq = &ring->cq;
84 
85 		(*cq->khead)++;
86 		/*
87 		 * Ensure that the kernel sees our new head, the kernel has
88 		 * the matching read barrier.
89 		 */
90 		write_barrier();
91 	}
92 }
93 
94 /*
95  * Command prep helpers
96  */
io_uring_sqe_set_data(struct io_uring_sqe * sqe,void * data)97 static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data)
98 {
99 	sqe->user_data = (unsigned long) data;
100 }
101 
io_uring_cqe_get_data(struct io_uring_cqe * cqe)102 static inline void *io_uring_cqe_get_data(struct io_uring_cqe *cqe)
103 {
104 	return (void *) (uintptr_t) cqe->user_data;
105 }
106 
io_uring_prep_rw(int op,struct io_uring_sqe * sqe,int fd,const void * addr,unsigned len,off_t offset)107 static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd,
108 				    const void *addr, unsigned len,
109 				    off_t offset)
110 {
111 	memset(sqe, 0, sizeof(*sqe));
112 	sqe->opcode = op;
113 	sqe->fd = fd;
114 	sqe->off = offset;
115 	sqe->addr = (unsigned long) addr;
116 	sqe->len = len;
117 }
118 
io_uring_prep_readv(struct io_uring_sqe * sqe,int fd,const struct iovec * iovecs,unsigned nr_vecs,off_t offset)119 static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd,
120 				       const struct iovec *iovecs,
121 				       unsigned nr_vecs, off_t offset)
122 {
123 	io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset);
124 }
125 
io_uring_prep_read_fixed(struct io_uring_sqe * sqe,int fd,void * buf,unsigned nbytes,off_t offset)126 static inline void io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd,
127 					    void *buf, unsigned nbytes,
128 					    off_t offset)
129 {
130 	io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset);
131 }
132 
io_uring_prep_writev(struct io_uring_sqe * sqe,int fd,const struct iovec * iovecs,unsigned nr_vecs,off_t offset)133 static inline void io_uring_prep_writev(struct io_uring_sqe *sqe, int fd,
134 					const struct iovec *iovecs,
135 					unsigned nr_vecs, off_t offset)
136 {
137 	io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset);
138 }
139 
io_uring_prep_write_fixed(struct io_uring_sqe * sqe,int fd,const void * buf,unsigned nbytes,off_t offset)140 static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd,
141 					     const void *buf, unsigned nbytes,
142 					     off_t offset)
143 {
144 	io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset);
145 }
146 
io_uring_prep_poll_add(struct io_uring_sqe * sqe,int fd,short poll_mask)147 static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
148 					  short poll_mask)
149 {
150 	memset(sqe, 0, sizeof(*sqe));
151 	sqe->opcode = IORING_OP_POLL_ADD;
152 	sqe->fd = fd;
153 	sqe->poll_events = poll_mask;
154 }
155 
io_uring_prep_poll_remove(struct io_uring_sqe * sqe,void * user_data)156 static inline void io_uring_prep_poll_remove(struct io_uring_sqe *sqe,
157 					     void *user_data)
158 {
159 	memset(sqe, 0, sizeof(*sqe));
160 	sqe->opcode = IORING_OP_POLL_REMOVE;
161 	sqe->addr = (unsigned long) user_data;
162 }
163 
io_uring_prep_fsync(struct io_uring_sqe * sqe,int fd,unsigned fsync_flags)164 static inline void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd,
165 				       unsigned fsync_flags)
166 {
167 	memset(sqe, 0, sizeof(*sqe));
168 	sqe->opcode = IORING_OP_FSYNC;
169 	sqe->fd = fd;
170 	sqe->fsync_flags = fsync_flags;
171 }
172 
io_uring_prep_nop(struct io_uring_sqe * sqe)173 static inline void io_uring_prep_nop(struct io_uring_sqe *sqe)
174 {
175 	memset(sqe, 0, sizeof(*sqe));
176 	sqe->opcode = IORING_OP_NOP;
177 }
178 
179 #ifdef __cplusplus
180 }
181 #endif
182 
183 #endif
184