1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * SUCS NET3:
4 *
5 * Generic datagram handling routines. These are generic for all
6 * protocols. Possibly a generic IP version on top of these would
7 * make sense. Not tonight however 8-).
8 * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
9 * NetROM layer all have identical poll code and mostly
10 * identical recvmsg() code. So we share it here. The poll was
11 * shared before but buried in udp.c so I moved it.
12 *
13 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old
14 * udp.c code)
15 *
16 * Fixes:
17 * Alan Cox : NULL return from skb_peek_copy()
18 * understood
19 * Alan Cox : Rewrote skb_read_datagram to avoid the
20 * skb_peek_copy stuff.
21 * Alan Cox : Added support for SOCK_SEQPACKET.
22 * IPX can no longer use the SO_TYPE hack
23 * but AX.25 now works right, and SPX is
24 * feasible.
25 * Alan Cox : Fixed write poll of non IP protocol
26 * crash.
27 * Florian La Roche: Changed for my new skbuff handling.
28 * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET.
29 * Linus Torvalds : BSD semantic fixes.
30 * Alan Cox : Datagram iovec handling
31 * Darryl Miles : Fixed non-blocking SOCK_STREAM.
32 * Alan Cox : POSIXisms
33 * Pete Wyckoff : Unconnected accept() fix.
34 *
35 */
36
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/uaccess.h>
41 #include <linux/mm.h>
42 #include <linux/interrupt.h>
43 #include <linux/errno.h>
44 #include <linux/sched.h>
45 #include <linux/inet.h>
46 #include <linux/netdevice.h>
47 #include <linux/rtnetlink.h>
48 #include <linux/poll.h>
49 #include <linux/highmem.h>
50 #include <linux/spinlock.h>
51 #include <linux/slab.h>
52 #include <linux/pagemap.h>
53 #include <linux/uio.h>
54 #include <linux/indirect_call_wrapper.h>
55
56 #include <net/protocol.h>
57 #include <linux/skbuff.h>
58
59 #include <net/checksum.h>
60 #include <net/sock.h>
61 #include <net/tcp_states.h>
62 #include <trace/events/skb.h>
63 #include <net/busy_poll.h>
64
65 /*
66 * Is a socket 'connection oriented' ?
67 */
connection_based(struct sock * sk)68 static inline int connection_based(struct sock *sk)
69 {
70 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
71 }
72
receiver_wake_function(wait_queue_entry_t * wait,unsigned int mode,int sync,void * key)73 static int receiver_wake_function(wait_queue_entry_t *wait, unsigned int mode, int sync,
74 void *key)
75 {
76 /*
77 * Avoid a wakeup if event not interesting for us
78 */
79 if (key && !(key_to_poll(key) & (EPOLLIN | EPOLLERR)))
80 return 0;
81 return autoremove_wake_function(wait, mode, sync, key);
82 }
83 /*
84 * Wait for the last received packet to be different from skb
85 */
__skb_wait_for_more_packets(struct sock * sk,struct sk_buff_head * queue,int * err,long * timeo_p,const struct sk_buff * skb)86 int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
87 int *err, long *timeo_p,
88 const struct sk_buff *skb)
89 {
90 int error;
91 DEFINE_WAIT_FUNC(wait, receiver_wake_function);
92
93 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
94
95 /* Socket errors? */
96 error = sock_error(sk);
97 if (error)
98 goto out_err;
99
100 if (READ_ONCE(queue->prev) != skb)
101 goto out;
102
103 /* Socket shut down? */
104 if (sk->sk_shutdown & RCV_SHUTDOWN)
105 goto out_noerr;
106
107 /* Sequenced packets can come disconnected.
108 * If so we report the problem
109 */
110 error = -ENOTCONN;
111 if (connection_based(sk) &&
112 !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
113 goto out_err;
114
115 /* handle signals */
116 if (signal_pending(current))
117 goto interrupted;
118
119 error = 0;
120 *timeo_p = schedule_timeout(*timeo_p);
121 out:
122 finish_wait(sk_sleep(sk), &wait);
123 return error;
124 interrupted:
125 error = sock_intr_errno(*timeo_p);
126 out_err:
127 *err = error;
128 goto out;
129 out_noerr:
130 *err = 0;
131 error = 1;
132 goto out;
133 }
134 EXPORT_SYMBOL(__skb_wait_for_more_packets);
135
skb_set_peeked(struct sk_buff * skb)136 static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
137 {
138 struct sk_buff *nskb;
139
140 if (skb->peeked)
141 return skb;
142
143 /* We have to unshare an skb before modifying it. */
144 if (!skb_shared(skb))
145 goto done;
146
147 nskb = skb_clone(skb, GFP_ATOMIC);
148 if (!nskb)
149 return ERR_PTR(-ENOMEM);
150
151 skb->prev->next = nskb;
152 skb->next->prev = nskb;
153 nskb->prev = skb->prev;
154 nskb->next = skb->next;
155
156 consume_skb(skb);
157 skb = nskb;
158
159 done:
160 skb->peeked = 1;
161
162 return skb;
163 }
164
__skb_try_recv_from_queue(struct sock * sk,struct sk_buff_head * queue,unsigned int flags,int * off,int * err,struct sk_buff ** last)165 struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
166 struct sk_buff_head *queue,
167 unsigned int flags,
168 int *off, int *err,
169 struct sk_buff **last)
170 {
171 bool peek_at_off = false;
172 struct sk_buff *skb;
173 int _off = 0;
174
175 if (unlikely(flags & MSG_PEEK && *off >= 0)) {
176 peek_at_off = true;
177 _off = *off;
178 }
179
180 *last = queue->prev;
181 skb_queue_walk(queue, skb) {
182 if (flags & MSG_PEEK) {
183 if (peek_at_off && _off >= skb->len &&
184 (_off || skb->peeked)) {
185 _off -= skb->len;
186 continue;
187 }
188 if (!skb->len) {
189 skb = skb_set_peeked(skb);
190 if (IS_ERR(skb)) {
191 *err = PTR_ERR(skb);
192 return NULL;
193 }
194 }
195 refcount_inc(&skb->users);
196 } else {
197 __skb_unlink(skb, queue);
198 }
199 *off = _off;
200 return skb;
201 }
202 return NULL;
203 }
204
205 /**
206 * __skb_try_recv_datagram - Receive a datagram skbuff
207 * @sk: socket
208 * @queue: socket queue from which to receive
209 * @flags: MSG\_ flags
210 * @off: an offset in bytes to peek skb from. Returns an offset
211 * within an skb where data actually starts
212 * @err: error code returned
213 * @last: set to last peeked message to inform the wait function
214 * what to look for when peeking
215 *
216 * Get a datagram skbuff, understands the peeking, nonblocking wakeups
217 * and possible races. This replaces identical code in packet, raw and
218 * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
219 * the long standing peek and read race for datagram sockets. If you
220 * alter this routine remember it must be re-entrant.
221 *
222 * This function will lock the socket if a skb is returned, so
223 * the caller needs to unlock the socket in that case (usually by
224 * calling skb_free_datagram). Returns NULL with @err set to
225 * -EAGAIN if no data was available or to some other value if an
226 * error was detected.
227 *
228 * * It does not lock socket since today. This function is
229 * * free of race conditions. This measure should/can improve
230 * * significantly datagram socket latencies at high loads,
231 * * when data copying to user space takes lots of time.
232 * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
233 * * 8) Great win.)
234 * * --ANK (980729)
235 *
236 * The order of the tests when we find no data waiting are specified
237 * quite explicitly by POSIX 1003.1g, don't change them without having
238 * the standard around please.
239 */
__skb_try_recv_datagram(struct sock * sk,struct sk_buff_head * queue,unsigned int flags,int * off,int * err,struct sk_buff ** last)240 struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
241 struct sk_buff_head *queue,
242 unsigned int flags, int *off, int *err,
243 struct sk_buff **last)
244 {
245 struct sk_buff *skb;
246 unsigned long cpu_flags;
247 /*
248 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
249 */
250 int error = sock_error(sk);
251
252 if (error)
253 goto no_packet;
254
255 do {
256 /* Again only user level code calls this function, so nothing
257 * interrupt level will suddenly eat the receive_queue.
258 *
259 * Look at current nfs client by the way...
260 * However, this function was correct in any case. 8)
261 */
262 spin_lock_irqsave(&queue->lock, cpu_flags);
263 skb = __skb_try_recv_from_queue(sk, queue, flags, off, &error,
264 last);
265 spin_unlock_irqrestore(&queue->lock, cpu_flags);
266 if (error)
267 goto no_packet;
268 if (skb)
269 return skb;
270
271 if (!sk_can_busy_loop(sk))
272 break;
273
274 sk_busy_loop(sk, flags & MSG_DONTWAIT);
275 } while (READ_ONCE(queue->prev) != *last);
276
277 error = -EAGAIN;
278
279 no_packet:
280 *err = error;
281 return NULL;
282 }
283 EXPORT_SYMBOL(__skb_try_recv_datagram);
284
__skb_recv_datagram(struct sock * sk,struct sk_buff_head * sk_queue,unsigned int flags,int * off,int * err)285 struct sk_buff *__skb_recv_datagram(struct sock *sk,
286 struct sk_buff_head *sk_queue,
287 unsigned int flags, int *off, int *err)
288 {
289 struct sk_buff *skb, *last;
290 long timeo;
291
292 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
293
294 do {
295 skb = __skb_try_recv_datagram(sk, sk_queue, flags, off, err,
296 &last);
297 if (skb)
298 return skb;
299
300 if (*err != -EAGAIN)
301 break;
302 } while (timeo &&
303 !__skb_wait_for_more_packets(sk, sk_queue, err,
304 &timeo, last));
305
306 return NULL;
307 }
308 EXPORT_SYMBOL(__skb_recv_datagram);
309
skb_recv_datagram(struct sock * sk,unsigned int flags,int * err)310 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
311 int *err)
312 {
313 int off = 0;
314
315 return __skb_recv_datagram(sk, &sk->sk_receive_queue, flags,
316 &off, err);
317 }
318 EXPORT_SYMBOL(skb_recv_datagram);
319
skb_free_datagram(struct sock * sk,struct sk_buff * skb)320 void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
321 {
322 consume_skb(skb);
323 }
324 EXPORT_SYMBOL(skb_free_datagram);
325
__skb_free_datagram_locked(struct sock * sk,struct sk_buff * skb,int len)326 void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len)
327 {
328 bool slow;
329
330 if (!skb_unref(skb)) {
331 sk_peek_offset_bwd(sk, len);
332 return;
333 }
334
335 slow = lock_sock_fast(sk);
336 sk_peek_offset_bwd(sk, len);
337 skb_orphan(skb);
338 unlock_sock_fast(sk, slow);
339
340 /* skb is now orphaned, can be freed outside of locked section */
341 __kfree_skb(skb);
342 }
343 EXPORT_SYMBOL(__skb_free_datagram_locked);
344
__sk_queue_drop_skb(struct sock * sk,struct sk_buff_head * sk_queue,struct sk_buff * skb,unsigned int flags,void (* destructor)(struct sock * sk,struct sk_buff * skb))345 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
346 struct sk_buff *skb, unsigned int flags,
347 void (*destructor)(struct sock *sk,
348 struct sk_buff *skb))
349 {
350 int err = 0;
351
352 if (flags & MSG_PEEK) {
353 err = -ENOENT;
354 spin_lock_bh(&sk_queue->lock);
355 if (skb->next) {
356 __skb_unlink(skb, sk_queue);
357 refcount_dec(&skb->users);
358 if (destructor)
359 destructor(sk, skb);
360 err = 0;
361 }
362 spin_unlock_bh(&sk_queue->lock);
363 }
364
365 atomic_inc(&sk->sk_drops);
366 return err;
367 }
368 EXPORT_SYMBOL(__sk_queue_drop_skb);
369
370 /**
371 * skb_kill_datagram - Free a datagram skbuff forcibly
372 * @sk: socket
373 * @skb: datagram skbuff
374 * @flags: MSG\_ flags
375 *
376 * This function frees a datagram skbuff that was received by
377 * skb_recv_datagram. The flags argument must match the one
378 * used for skb_recv_datagram.
379 *
380 * If the MSG_PEEK flag is set, and the packet is still on the
381 * receive queue of the socket, it will be taken off the queue
382 * before it is freed.
383 *
384 * This function currently only disables BH when acquiring the
385 * sk_receive_queue lock. Therefore it must not be used in a
386 * context where that lock is acquired in an IRQ context.
387 *
388 * It returns 0 if the packet was removed by us.
389 */
390
skb_kill_datagram(struct sock * sk,struct sk_buff * skb,unsigned int flags)391 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
392 {
393 int err = __sk_queue_drop_skb(sk, &sk->sk_receive_queue, skb, flags,
394 NULL);
395
396 kfree_skb(skb);
397 return err;
398 }
399 EXPORT_SYMBOL(skb_kill_datagram);
400
401 INDIRECT_CALLABLE_DECLARE(static size_t simple_copy_to_iter(const void *addr,
402 size_t bytes,
403 void *data __always_unused,
404 struct iov_iter *i));
405
__skb_datagram_iter(const struct sk_buff * skb,int offset,struct iov_iter * to,int len,bool fault_short,size_t (* cb)(const void *,size_t,void *,struct iov_iter *),void * data)406 static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
407 struct iov_iter *to, int len, bool fault_short,
408 size_t (*cb)(const void *, size_t, void *,
409 struct iov_iter *), void *data)
410 {
411 int start = skb_headlen(skb);
412 int i, copy = start - offset, start_off = offset, n;
413 struct sk_buff *frag_iter;
414
415 /* Copy header. */
416 if (copy > 0) {
417 if (copy > len)
418 copy = len;
419 n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
420 skb->data + offset, copy, data, to);
421 offset += n;
422 if (n != copy)
423 goto short_copy;
424 if ((len -= copy) == 0)
425 return 0;
426 }
427
428 /* Copy paged appendix. Hmm... why does this look so complicated? */
429 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
430 int end;
431 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
432
433 WARN_ON(start > offset + len);
434
435 end = start + skb_frag_size(frag);
436 if ((copy = end - offset) > 0) {
437 struct page *page = skb_frag_page(frag);
438 u8 *vaddr = kmap(page);
439
440 if (copy > len)
441 copy = len;
442 n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
443 vaddr + skb_frag_off(frag) + offset - start,
444 copy, data, to);
445 kunmap(page);
446 offset += n;
447 if (n != copy)
448 goto short_copy;
449 if (!(len -= copy))
450 return 0;
451 }
452 start = end;
453 }
454
455 skb_walk_frags(skb, frag_iter) {
456 int end;
457
458 WARN_ON(start > offset + len);
459
460 end = start + frag_iter->len;
461 if ((copy = end - offset) > 0) {
462 if (copy > len)
463 copy = len;
464 if (__skb_datagram_iter(frag_iter, offset - start,
465 to, copy, fault_short, cb, data))
466 goto fault;
467 if ((len -= copy) == 0)
468 return 0;
469 offset += copy;
470 }
471 start = end;
472 }
473 if (!len)
474 return 0;
475
476 /* This is not really a user copy fault, but rather someone
477 * gave us a bogus length on the skb. We should probably
478 * print a warning here as it may indicate a kernel bug.
479 */
480
481 fault:
482 iov_iter_revert(to, offset - start_off);
483 return -EFAULT;
484
485 short_copy:
486 if (fault_short || iov_iter_count(to))
487 goto fault;
488
489 return 0;
490 }
491
492 /**
493 * skb_copy_and_hash_datagram_iter - Copy datagram to an iovec iterator
494 * and update a hash.
495 * @skb: buffer to copy
496 * @offset: offset in the buffer to start copying from
497 * @to: iovec iterator to copy to
498 * @len: amount of data to copy from buffer to iovec
499 * @hash: hash request to update
500 */
skb_copy_and_hash_datagram_iter(const struct sk_buff * skb,int offset,struct iov_iter * to,int len,struct ahash_request * hash)501 int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
502 struct iov_iter *to, int len,
503 struct ahash_request *hash)
504 {
505 return __skb_datagram_iter(skb, offset, to, len, true,
506 hash_and_copy_to_iter, hash);
507 }
508 EXPORT_SYMBOL(skb_copy_and_hash_datagram_iter);
509
simple_copy_to_iter(const void * addr,size_t bytes,void * data __always_unused,struct iov_iter * i)510 static size_t simple_copy_to_iter(const void *addr, size_t bytes,
511 void *data __always_unused, struct iov_iter *i)
512 {
513 return copy_to_iter(addr, bytes, i);
514 }
515
516 /**
517 * skb_copy_datagram_iter - Copy a datagram to an iovec iterator.
518 * @skb: buffer to copy
519 * @offset: offset in the buffer to start copying from
520 * @to: iovec iterator to copy to
521 * @len: amount of data to copy from buffer to iovec
522 */
skb_copy_datagram_iter(const struct sk_buff * skb,int offset,struct iov_iter * to,int len)523 int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
524 struct iov_iter *to, int len)
525 {
526 trace_skb_copy_datagram_iovec(skb, len);
527 return __skb_datagram_iter(skb, offset, to, len, false,
528 simple_copy_to_iter, NULL);
529 }
530 EXPORT_SYMBOL(skb_copy_datagram_iter);
531
532 /**
533 * skb_copy_datagram_from_iter - Copy a datagram from an iov_iter.
534 * @skb: buffer to copy
535 * @offset: offset in the buffer to start copying to
536 * @from: the copy source
537 * @len: amount of data to copy to buffer from iovec
538 *
539 * Returns 0 or -EFAULT.
540 */
skb_copy_datagram_from_iter(struct sk_buff * skb,int offset,struct iov_iter * from,int len)541 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
542 struct iov_iter *from,
543 int len)
544 {
545 int start = skb_headlen(skb);
546 int i, copy = start - offset;
547 struct sk_buff *frag_iter;
548
549 /* Copy header. */
550 if (copy > 0) {
551 if (copy > len)
552 copy = len;
553 if (copy_from_iter(skb->data + offset, copy, from) != copy)
554 goto fault;
555 if ((len -= copy) == 0)
556 return 0;
557 offset += copy;
558 }
559
560 /* Copy paged appendix. Hmm... why does this look so complicated? */
561 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
562 int end;
563 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
564
565 WARN_ON(start > offset + len);
566
567 end = start + skb_frag_size(frag);
568 if ((copy = end - offset) > 0) {
569 size_t copied;
570
571 if (copy > len)
572 copy = len;
573 copied = copy_page_from_iter(skb_frag_page(frag),
574 skb_frag_off(frag) + offset - start,
575 copy, from);
576 if (copied != copy)
577 goto fault;
578
579 if (!(len -= copy))
580 return 0;
581 offset += copy;
582 }
583 start = end;
584 }
585
586 skb_walk_frags(skb, frag_iter) {
587 int end;
588
589 WARN_ON(start > offset + len);
590
591 end = start + frag_iter->len;
592 if ((copy = end - offset) > 0) {
593 if (copy > len)
594 copy = len;
595 if (skb_copy_datagram_from_iter(frag_iter,
596 offset - start,
597 from, copy))
598 goto fault;
599 if ((len -= copy) == 0)
600 return 0;
601 offset += copy;
602 }
603 start = end;
604 }
605 if (!len)
606 return 0;
607
608 fault:
609 return -EFAULT;
610 }
611 EXPORT_SYMBOL(skb_copy_datagram_from_iter);
612
__zerocopy_sg_from_iter(struct msghdr * msg,struct sock * sk,struct sk_buff * skb,struct iov_iter * from,size_t length)613 int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
614 struct sk_buff *skb, struct iov_iter *from,
615 size_t length)
616 {
617 int frag;
618
619 if (msg && msg->msg_ubuf && msg->sg_from_iter)
620 return msg->sg_from_iter(sk, skb, from, length);
621
622 frag = skb_shinfo(skb)->nr_frags;
623
624 while (length && iov_iter_count(from)) {
625 struct page *head, *last_head = NULL;
626 struct page *pages[MAX_SKB_FRAGS];
627 int refs, order, n = 0;
628 size_t start;
629 ssize_t copied;
630 unsigned long truesize;
631
632 if (frag == MAX_SKB_FRAGS)
633 return -EMSGSIZE;
634
635 copied = iov_iter_get_pages2(from, pages, length,
636 MAX_SKB_FRAGS - frag, &start);
637 if (copied < 0)
638 return -EFAULT;
639
640 length -= copied;
641
642 truesize = PAGE_ALIGN(copied + start);
643 skb->data_len += copied;
644 skb->len += copied;
645 skb->truesize += truesize;
646 if (sk && sk->sk_type == SOCK_STREAM) {
647 sk_wmem_queued_add(sk, truesize);
648 if (!skb_zcopy_pure(skb))
649 sk_mem_charge(sk, truesize);
650 } else {
651 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
652 }
653
654 head = compound_head(pages[n]);
655 order = compound_order(head);
656
657 for (refs = 0; copied != 0; start = 0) {
658 int size = min_t(int, copied, PAGE_SIZE - start);
659
660 if (pages[n] - head > (1UL << order) - 1) {
661 head = compound_head(pages[n]);
662 order = compound_order(head);
663 }
664
665 start += (pages[n] - head) << PAGE_SHIFT;
666 copied -= size;
667 n++;
668 if (frag) {
669 skb_frag_t *last = &skb_shinfo(skb)->frags[frag - 1];
670
671 if (head == skb_frag_page(last) &&
672 start == skb_frag_off(last) + skb_frag_size(last)) {
673 skb_frag_size_add(last, size);
674 /* We combined this page, we need to release
675 * a reference. Since compound pages refcount
676 * is shared among many pages, batch the refcount
677 * adjustments to limit false sharing.
678 */
679 last_head = head;
680 refs++;
681 continue;
682 }
683 }
684 if (refs) {
685 page_ref_sub(last_head, refs);
686 refs = 0;
687 }
688 skb_fill_page_desc_noacc(skb, frag++, head, start, size);
689 }
690 if (refs)
691 page_ref_sub(last_head, refs);
692 }
693 return 0;
694 }
695 EXPORT_SYMBOL(__zerocopy_sg_from_iter);
696
697 /**
698 * zerocopy_sg_from_iter - Build a zerocopy datagram from an iov_iter
699 * @skb: buffer to copy
700 * @from: the source to copy from
701 *
702 * The function will first copy up to headlen, and then pin the userspace
703 * pages and build frags through them.
704 *
705 * Returns 0, -EFAULT or -EMSGSIZE.
706 */
zerocopy_sg_from_iter(struct sk_buff * skb,struct iov_iter * from)707 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
708 {
709 int copy = min_t(int, skb_headlen(skb), iov_iter_count(from));
710
711 /* copy up to skb headlen */
712 if (skb_copy_datagram_from_iter(skb, 0, from, copy))
713 return -EFAULT;
714
715 return __zerocopy_sg_from_iter(NULL, NULL, skb, from, ~0U);
716 }
717 EXPORT_SYMBOL(zerocopy_sg_from_iter);
718
719 /**
720 * skb_copy_and_csum_datagram - Copy datagram to an iovec iterator
721 * and update a checksum.
722 * @skb: buffer to copy
723 * @offset: offset in the buffer to start copying from
724 * @to: iovec iterator to copy to
725 * @len: amount of data to copy from buffer to iovec
726 * @csump: checksum pointer
727 */
skb_copy_and_csum_datagram(const struct sk_buff * skb,int offset,struct iov_iter * to,int len,__wsum * csump)728 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
729 struct iov_iter *to, int len,
730 __wsum *csump)
731 {
732 struct csum_state csdata = { .csum = *csump };
733 int ret;
734
735 ret = __skb_datagram_iter(skb, offset, to, len, true,
736 csum_and_copy_to_iter, &csdata);
737 if (ret)
738 return ret;
739
740 *csump = csdata.csum;
741 return 0;
742 }
743
744 /**
745 * skb_copy_and_csum_datagram_msg - Copy and checksum skb to user iovec.
746 * @skb: skbuff
747 * @hlen: hardware length
748 * @msg: destination
749 *
750 * Caller _must_ check that skb will fit to this iovec.
751 *
752 * Returns: 0 - success.
753 * -EINVAL - checksum failure.
754 * -EFAULT - fault during copy.
755 */
skb_copy_and_csum_datagram_msg(struct sk_buff * skb,int hlen,struct msghdr * msg)756 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
757 int hlen, struct msghdr *msg)
758 {
759 __wsum csum;
760 int chunk = skb->len - hlen;
761
762 if (!chunk)
763 return 0;
764
765 if (msg_data_left(msg) < chunk) {
766 if (__skb_checksum_complete(skb))
767 return -EINVAL;
768 if (skb_copy_datagram_msg(skb, hlen, msg, chunk))
769 goto fault;
770 } else {
771 csum = csum_partial(skb->data, hlen, skb->csum);
772 if (skb_copy_and_csum_datagram(skb, hlen, &msg->msg_iter,
773 chunk, &csum))
774 goto fault;
775
776 if (csum_fold(csum)) {
777 iov_iter_revert(&msg->msg_iter, chunk);
778 return -EINVAL;
779 }
780
781 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
782 !skb->csum_complete_sw)
783 netdev_rx_csum_fault(NULL, skb);
784 }
785 return 0;
786 fault:
787 return -EFAULT;
788 }
789 EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
790
791 /**
792 * datagram_poll - generic datagram poll
793 * @file: file struct
794 * @sock: socket
795 * @wait: poll table
796 *
797 * Datagram poll: Again totally generic. This also handles
798 * sequenced packet sockets providing the socket receive queue
799 * is only ever holding data ready to receive.
800 *
801 * Note: when you *don't* use this routine for this protocol,
802 * and you use a different write policy from sock_writeable()
803 * then please supply your own write_space callback.
804 */
datagram_poll(struct file * file,struct socket * sock,poll_table * wait)805 __poll_t datagram_poll(struct file *file, struct socket *sock,
806 poll_table *wait)
807 {
808 struct sock *sk = sock->sk;
809 __poll_t mask;
810 u8 shutdown;
811
812 sock_poll_wait(file, sock, wait);
813 mask = 0;
814
815 /* exceptional events? */
816 if (READ_ONCE(sk->sk_err) ||
817 !skb_queue_empty_lockless(&sk->sk_error_queue))
818 mask |= EPOLLERR |
819 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
820
821 shutdown = READ_ONCE(sk->sk_shutdown);
822 if (shutdown & RCV_SHUTDOWN)
823 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
824 if (shutdown == SHUTDOWN_MASK)
825 mask |= EPOLLHUP;
826
827 /* readable? */
828 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
829 mask |= EPOLLIN | EPOLLRDNORM;
830
831 /* Connection-based need to check for termination and startup */
832 if (connection_based(sk)) {
833 int state = READ_ONCE(sk->sk_state);
834
835 if (state == TCP_CLOSE)
836 mask |= EPOLLHUP;
837 /* connection hasn't started yet? */
838 if (state == TCP_SYN_SENT)
839 return mask;
840 }
841
842 /* writable? */
843 if (sock_writeable(sk))
844 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
845 else
846 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
847
848 return mask;
849 }
850 EXPORT_SYMBOL(datagram_poll);
851