1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4 #ifndef _LINUX_SKMSG_H
5 #define _LINUX_SKMSG_H
6
7 #include <linux/bpf.h>
8 #include <linux/filter.h>
9 #include <linux/scatterlist.h>
10 #include <linux/skbuff.h>
11
12 #include <net/sock.h>
13 #include <net/tcp.h>
14 #include <net/strparser.h>
15
16 #define MAX_MSG_FRAGS MAX_SKB_FRAGS
17 #define NR_MSG_FRAG_IDS (MAX_MSG_FRAGS + 1)
18
19 enum __sk_action {
20 __SK_DROP = 0,
21 __SK_PASS,
22 __SK_REDIRECT,
23 __SK_NONE,
24 };
25
26 struct sk_msg_sg {
27 u32 start;
28 u32 curr;
29 u32 end;
30 u32 size;
31 u32 copybreak;
32 DECLARE_BITMAP(copy, MAX_MSG_FRAGS + 2);
33 /* The extra two elements:
34 * 1) used for chaining the front and sections when the list becomes
35 * partitioned (e.g. end < start). The crypto APIs require the
36 * chaining;
37 * 2) to chain tailer SG entries after the message.
38 */
39 struct scatterlist data[MAX_MSG_FRAGS + 2];
40 };
41
42 /* UAPI in filter.c depends on struct sk_msg_sg being first element. */
43 struct sk_msg {
44 struct sk_msg_sg sg;
45 void *data;
46 void *data_end;
47 u32 apply_bytes;
48 u32 cork_bytes;
49 u32 flags;
50 struct sk_buff *skb;
51 struct sock *sk_redir;
52 struct sock *sk;
53 struct list_head list;
54 };
55
56 struct sk_psock_progs {
57 struct bpf_prog *msg_parser;
58 struct bpf_prog *stream_parser;
59 struct bpf_prog *stream_verdict;
60 struct bpf_prog *skb_verdict;
61 };
62
63 enum sk_psock_state_bits {
64 SK_PSOCK_TX_ENABLED,
65 SK_PSOCK_RX_STRP_ENABLED,
66 };
67
68 struct sk_psock_link {
69 struct list_head list;
70 struct bpf_map *map;
71 void *link_raw;
72 };
73
74 struct sk_psock_work_state {
75 u32 len;
76 u32 off;
77 };
78
79 struct sk_psock {
80 struct sock *sk;
81 struct sock *sk_redir;
82 u32 apply_bytes;
83 u32 cork_bytes;
84 u32 eval;
85 bool redir_ingress; /* undefined if sk_redir is null */
86 struct sk_msg *cork;
87 struct sk_psock_progs progs;
88 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
89 struct strparser strp;
90 #endif
91 struct sk_buff_head ingress_skb;
92 struct list_head ingress_msg;
93 spinlock_t ingress_lock;
94 unsigned long state;
95 struct list_head link;
96 spinlock_t link_lock;
97 refcount_t refcnt;
98 void (*saved_unhash)(struct sock *sk);
99 void (*saved_destroy)(struct sock *sk);
100 void (*saved_close)(struct sock *sk, long timeout);
101 void (*saved_write_space)(struct sock *sk);
102 void (*saved_data_ready)(struct sock *sk);
103 int (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock,
104 bool restore);
105 struct proto *sk_proto;
106 struct mutex work_mutex;
107 struct sk_psock_work_state work_state;
108 struct delayed_work work;
109 struct rcu_work rwork;
110 };
111
112 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
113 int elem_first_coalesce);
114 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
115 u32 off, u32 len);
116 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len);
117 int sk_msg_free(struct sock *sk, struct sk_msg *msg);
118 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg);
119 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes);
120 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
121 u32 bytes);
122
123 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes);
124 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes);
125
126 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
127 struct sk_msg *msg, u32 bytes);
128 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
129 struct sk_msg *msg, u32 bytes);
130 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
131 int len, int flags);
132 bool sk_msg_is_readable(struct sock *sk);
133
sk_msg_check_to_free(struct sk_msg * msg,u32 i,u32 bytes)134 static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
135 {
136 WARN_ON(i == msg->sg.end && bytes);
137 }
138
sk_msg_apply_bytes(struct sk_psock * psock,u32 bytes)139 static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes)
140 {
141 if (psock->apply_bytes) {
142 if (psock->apply_bytes < bytes)
143 psock->apply_bytes = 0;
144 else
145 psock->apply_bytes -= bytes;
146 }
147 }
148
sk_msg_iter_dist(u32 start,u32 end)149 static inline u32 sk_msg_iter_dist(u32 start, u32 end)
150 {
151 return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start);
152 }
153
154 #define sk_msg_iter_var_prev(var) \
155 do { \
156 if (var == 0) \
157 var = NR_MSG_FRAG_IDS - 1; \
158 else \
159 var--; \
160 } while (0)
161
162 #define sk_msg_iter_var_next(var) \
163 do { \
164 var++; \
165 if (var == NR_MSG_FRAG_IDS) \
166 var = 0; \
167 } while (0)
168
169 #define sk_msg_iter_prev(msg, which) \
170 sk_msg_iter_var_prev(msg->sg.which)
171
172 #define sk_msg_iter_next(msg, which) \
173 sk_msg_iter_var_next(msg->sg.which)
174
sk_msg_init(struct sk_msg * msg)175 static inline void sk_msg_init(struct sk_msg *msg)
176 {
177 BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS);
178 memset(msg, 0, sizeof(*msg));
179 sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
180 }
181
sk_msg_xfer(struct sk_msg * dst,struct sk_msg * src,int which,u32 size)182 static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
183 int which, u32 size)
184 {
185 dst->sg.data[which] = src->sg.data[which];
186 dst->sg.data[which].length = size;
187 dst->sg.size += size;
188 src->sg.size -= size;
189 src->sg.data[which].length -= size;
190 src->sg.data[which].offset += size;
191 }
192
sk_msg_xfer_full(struct sk_msg * dst,struct sk_msg * src)193 static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src)
194 {
195 memcpy(dst, src, sizeof(*src));
196 sk_msg_init(src);
197 }
198
sk_msg_full(const struct sk_msg * msg)199 static inline bool sk_msg_full(const struct sk_msg *msg)
200 {
201 return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS;
202 }
203
sk_msg_elem_used(const struct sk_msg * msg)204 static inline u32 sk_msg_elem_used(const struct sk_msg *msg)
205 {
206 return sk_msg_iter_dist(msg->sg.start, msg->sg.end);
207 }
208
sk_msg_elem(struct sk_msg * msg,int which)209 static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which)
210 {
211 return &msg->sg.data[which];
212 }
213
sk_msg_elem_cpy(struct sk_msg * msg,int which)214 static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which)
215 {
216 return msg->sg.data[which];
217 }
218
sk_msg_page(struct sk_msg * msg,int which)219 static inline struct page *sk_msg_page(struct sk_msg *msg, int which)
220 {
221 return sg_page(sk_msg_elem(msg, which));
222 }
223
sk_msg_to_ingress(const struct sk_msg * msg)224 static inline bool sk_msg_to_ingress(const struct sk_msg *msg)
225 {
226 return msg->flags & BPF_F_INGRESS;
227 }
228
sk_msg_compute_data_pointers(struct sk_msg * msg)229 static inline void sk_msg_compute_data_pointers(struct sk_msg *msg)
230 {
231 struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start);
232
233 if (test_bit(msg->sg.start, msg->sg.copy)) {
234 msg->data = NULL;
235 msg->data_end = NULL;
236 } else {
237 msg->data = sg_virt(sge);
238 msg->data_end = msg->data + sge->length;
239 }
240 }
241
sk_msg_page_add(struct sk_msg * msg,struct page * page,u32 len,u32 offset)242 static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
243 u32 len, u32 offset)
244 {
245 struct scatterlist *sge;
246
247 get_page(page);
248 sge = sk_msg_elem(msg, msg->sg.end);
249 sg_set_page(sge, page, len, offset);
250 sg_unmark_end(sge);
251
252 __set_bit(msg->sg.end, msg->sg.copy);
253 msg->sg.size += len;
254 sk_msg_iter_next(msg, end);
255 }
256
sk_msg_sg_copy(struct sk_msg * msg,u32 i,bool copy_state)257 static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state)
258 {
259 do {
260 if (copy_state)
261 __set_bit(i, msg->sg.copy);
262 else
263 __clear_bit(i, msg->sg.copy);
264 sk_msg_iter_var_next(i);
265 if (i == msg->sg.end)
266 break;
267 } while (1);
268 }
269
sk_msg_sg_copy_set(struct sk_msg * msg,u32 start)270 static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start)
271 {
272 sk_msg_sg_copy(msg, start, true);
273 }
274
sk_msg_sg_copy_clear(struct sk_msg * msg,u32 start)275 static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start)
276 {
277 sk_msg_sg_copy(msg, start, false);
278 }
279
sk_psock(const struct sock * sk)280 static inline struct sk_psock *sk_psock(const struct sock *sk)
281 {
282 return __rcu_dereference_sk_user_data_with_flags(sk,
283 SK_USER_DATA_PSOCK);
284 }
285
sk_psock_set_state(struct sk_psock * psock,enum sk_psock_state_bits bit)286 static inline void sk_psock_set_state(struct sk_psock *psock,
287 enum sk_psock_state_bits bit)
288 {
289 set_bit(bit, &psock->state);
290 }
291
sk_psock_clear_state(struct sk_psock * psock,enum sk_psock_state_bits bit)292 static inline void sk_psock_clear_state(struct sk_psock *psock,
293 enum sk_psock_state_bits bit)
294 {
295 clear_bit(bit, &psock->state);
296 }
297
sk_psock_test_state(const struct sk_psock * psock,enum sk_psock_state_bits bit)298 static inline bool sk_psock_test_state(const struct sk_psock *psock,
299 enum sk_psock_state_bits bit)
300 {
301 return test_bit(bit, &psock->state);
302 }
303
sock_drop(struct sock * sk,struct sk_buff * skb)304 static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
305 {
306 sk_drops_add(sk, skb);
307 kfree_skb(skb);
308 }
309
sk_psock_queue_msg(struct sk_psock * psock,struct sk_msg * msg)310 static inline void sk_psock_queue_msg(struct sk_psock *psock,
311 struct sk_msg *msg)
312 {
313 spin_lock_bh(&psock->ingress_lock);
314 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
315 list_add_tail(&msg->list, &psock->ingress_msg);
316 else {
317 sk_msg_free(psock->sk, msg);
318 kfree(msg);
319 }
320 spin_unlock_bh(&psock->ingress_lock);
321 }
322
sk_psock_dequeue_msg(struct sk_psock * psock)323 static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock)
324 {
325 struct sk_msg *msg;
326
327 spin_lock_bh(&psock->ingress_lock);
328 msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
329 if (msg)
330 list_del(&msg->list);
331 spin_unlock_bh(&psock->ingress_lock);
332 return msg;
333 }
334
sk_psock_peek_msg(struct sk_psock * psock)335 static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock)
336 {
337 struct sk_msg *msg;
338
339 spin_lock_bh(&psock->ingress_lock);
340 msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
341 spin_unlock_bh(&psock->ingress_lock);
342 return msg;
343 }
344
sk_psock_next_msg(struct sk_psock * psock,struct sk_msg * msg)345 static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock,
346 struct sk_msg *msg)
347 {
348 struct sk_msg *ret;
349
350 spin_lock_bh(&psock->ingress_lock);
351 if (list_is_last(&msg->list, &psock->ingress_msg))
352 ret = NULL;
353 else
354 ret = list_next_entry(msg, list);
355 spin_unlock_bh(&psock->ingress_lock);
356 return ret;
357 }
358
sk_psock_queue_empty(const struct sk_psock * psock)359 static inline bool sk_psock_queue_empty(const struct sk_psock *psock)
360 {
361 return psock ? list_empty(&psock->ingress_msg) : true;
362 }
363
kfree_sk_msg(struct sk_msg * msg)364 static inline void kfree_sk_msg(struct sk_msg *msg)
365 {
366 if (msg->skb)
367 consume_skb(msg->skb);
368 kfree(msg);
369 }
370
sk_psock_report_error(struct sk_psock * psock,int err)371 static inline void sk_psock_report_error(struct sk_psock *psock, int err)
372 {
373 struct sock *sk = psock->sk;
374
375 sk->sk_err = err;
376 sk_error_report(sk);
377 }
378
379 struct sk_psock *sk_psock_init(struct sock *sk, int node);
380 void sk_psock_stop(struct sk_psock *psock);
381
382 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
383 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
384 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock);
385 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock);
386 #else
sk_psock_init_strp(struct sock * sk,struct sk_psock * psock)387 static inline int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
388 {
389 return -EOPNOTSUPP;
390 }
391
sk_psock_start_strp(struct sock * sk,struct sk_psock * psock)392 static inline void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
393 {
394 }
395
sk_psock_stop_strp(struct sock * sk,struct sk_psock * psock)396 static inline void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
397 {
398 }
399 #endif
400
401 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock);
402 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock);
403
404 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
405 struct sk_msg *msg);
406
sk_psock_init_link(void)407 static inline struct sk_psock_link *sk_psock_init_link(void)
408 {
409 return kzalloc(sizeof(struct sk_psock_link),
410 GFP_ATOMIC | __GFP_NOWARN);
411 }
412
sk_psock_free_link(struct sk_psock_link * link)413 static inline void sk_psock_free_link(struct sk_psock_link *link)
414 {
415 kfree(link);
416 }
417
418 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock);
419
sk_psock_cork_free(struct sk_psock * psock)420 static inline void sk_psock_cork_free(struct sk_psock *psock)
421 {
422 if (psock->cork) {
423 sk_msg_free(psock->sk, psock->cork);
424 kfree(psock->cork);
425 psock->cork = NULL;
426 }
427 }
428
sk_psock_restore_proto(struct sock * sk,struct sk_psock * psock)429 static inline void sk_psock_restore_proto(struct sock *sk,
430 struct sk_psock *psock)
431 {
432 if (psock->psock_update_sk_prot)
433 psock->psock_update_sk_prot(sk, psock, true);
434 }
435
sk_psock_get(struct sock * sk)436 static inline struct sk_psock *sk_psock_get(struct sock *sk)
437 {
438 struct sk_psock *psock;
439
440 rcu_read_lock();
441 psock = sk_psock(sk);
442 if (psock && !refcount_inc_not_zero(&psock->refcnt))
443 psock = NULL;
444 rcu_read_unlock();
445 return psock;
446 }
447
448 void sk_psock_drop(struct sock *sk, struct sk_psock *psock);
449
sk_psock_put(struct sock * sk,struct sk_psock * psock)450 static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
451 {
452 if (refcount_dec_and_test(&psock->refcnt))
453 sk_psock_drop(sk, psock);
454 }
455
sk_psock_data_ready(struct sock * sk,struct sk_psock * psock)456 static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
457 {
458 if (psock->saved_data_ready)
459 psock->saved_data_ready(sk);
460 else
461 sk->sk_data_ready(sk);
462 }
463
psock_set_prog(struct bpf_prog ** pprog,struct bpf_prog * prog)464 static inline void psock_set_prog(struct bpf_prog **pprog,
465 struct bpf_prog *prog)
466 {
467 prog = xchg(pprog, prog);
468 if (prog)
469 bpf_prog_put(prog);
470 }
471
psock_replace_prog(struct bpf_prog ** pprog,struct bpf_prog * prog,struct bpf_prog * old)472 static inline int psock_replace_prog(struct bpf_prog **pprog,
473 struct bpf_prog *prog,
474 struct bpf_prog *old)
475 {
476 if (cmpxchg(pprog, old, prog) != old)
477 return -ENOENT;
478
479 if (old)
480 bpf_prog_put(old);
481
482 return 0;
483 }
484
psock_progs_drop(struct sk_psock_progs * progs)485 static inline void psock_progs_drop(struct sk_psock_progs *progs)
486 {
487 psock_set_prog(&progs->msg_parser, NULL);
488 psock_set_prog(&progs->stream_parser, NULL);
489 psock_set_prog(&progs->stream_verdict, NULL);
490 psock_set_prog(&progs->skb_verdict, NULL);
491 }
492
493 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb);
494
sk_psock_strp_enabled(struct sk_psock * psock)495 static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
496 {
497 if (!psock)
498 return false;
499 return !!psock->saved_data_ready;
500 }
501
sk_is_udp(const struct sock * sk)502 static inline bool sk_is_udp(const struct sock *sk)
503 {
504 return sk->sk_type == SOCK_DGRAM &&
505 sk->sk_protocol == IPPROTO_UDP;
506 }
507
508 #if IS_ENABLED(CONFIG_NET_SOCK_MSG)
509
510 #define BPF_F_STRPARSER (1UL << 1)
511
512 /* We only have two bits so far. */
513 #define BPF_F_PTR_MASK ~(BPF_F_INGRESS | BPF_F_STRPARSER)
514
skb_bpf_strparser(const struct sk_buff * skb)515 static inline bool skb_bpf_strparser(const struct sk_buff *skb)
516 {
517 unsigned long sk_redir = skb->_sk_redir;
518
519 return sk_redir & BPF_F_STRPARSER;
520 }
521
skb_bpf_set_strparser(struct sk_buff * skb)522 static inline void skb_bpf_set_strparser(struct sk_buff *skb)
523 {
524 skb->_sk_redir |= BPF_F_STRPARSER;
525 }
526
skb_bpf_ingress(const struct sk_buff * skb)527 static inline bool skb_bpf_ingress(const struct sk_buff *skb)
528 {
529 unsigned long sk_redir = skb->_sk_redir;
530
531 return sk_redir & BPF_F_INGRESS;
532 }
533
skb_bpf_set_ingress(struct sk_buff * skb)534 static inline void skb_bpf_set_ingress(struct sk_buff *skb)
535 {
536 skb->_sk_redir |= BPF_F_INGRESS;
537 }
538
skb_bpf_set_redir(struct sk_buff * skb,struct sock * sk_redir,bool ingress)539 static inline void skb_bpf_set_redir(struct sk_buff *skb, struct sock *sk_redir,
540 bool ingress)
541 {
542 skb->_sk_redir = (unsigned long)sk_redir;
543 if (ingress)
544 skb->_sk_redir |= BPF_F_INGRESS;
545 }
546
skb_bpf_redirect_fetch(const struct sk_buff * skb)547 static inline struct sock *skb_bpf_redirect_fetch(const struct sk_buff *skb)
548 {
549 unsigned long sk_redir = skb->_sk_redir;
550
551 return (struct sock *)(sk_redir & BPF_F_PTR_MASK);
552 }
553
skb_bpf_redirect_clear(struct sk_buff * skb)554 static inline void skb_bpf_redirect_clear(struct sk_buff *skb)
555 {
556 skb->_sk_redir = 0;
557 }
558 #endif /* CONFIG_NET_SOCK_MSG */
559 #endif /* _LINUX_SKMSG_H */
560