1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/etherdevice.h>
3 #include <linux/if_tap.h>
4 #include <linux/if_vlan.h>
5 #include <linux/interrupt.h>
6 #include <linux/nsproxy.h>
7 #include <linux/compat.h>
8 #include <linux/if_tun.h>
9 #include <linux/module.h>
10 #include <linux/skbuff.h>
11 #include <linux/cache.h>
12 #include <linux/sched/signal.h>
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/wait.h>
16 #include <linux/cdev.h>
17 #include <linux/idr.h>
18 #include <linux/fs.h>
19 #include <linux/uio.h>
20
21 #include <net/net_namespace.h>
22 #include <net/rtnetlink.h>
23 #include <net/sock.h>
24 #include <linux/virtio_net.h>
25 #include <linux/skb_array.h>
26
27 #define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
28
29 #define TAP_VNET_LE 0x80000000
30 #define TAP_VNET_BE 0x40000000
31
32 #ifdef CONFIG_TUN_VNET_CROSS_LE
tap_legacy_is_little_endian(struct tap_queue * q)33 static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
34 {
35 return q->flags & TAP_VNET_BE ? false :
36 virtio_legacy_is_little_endian();
37 }
38
tap_get_vnet_be(struct tap_queue * q,int __user * sp)39 static long tap_get_vnet_be(struct tap_queue *q, int __user *sp)
40 {
41 int s = !!(q->flags & TAP_VNET_BE);
42
43 if (put_user(s, sp))
44 return -EFAULT;
45
46 return 0;
47 }
48
tap_set_vnet_be(struct tap_queue * q,int __user * sp)49 static long tap_set_vnet_be(struct tap_queue *q, int __user *sp)
50 {
51 int s;
52
53 if (get_user(s, sp))
54 return -EFAULT;
55
56 if (s)
57 q->flags |= TAP_VNET_BE;
58 else
59 q->flags &= ~TAP_VNET_BE;
60
61 return 0;
62 }
63 #else
tap_legacy_is_little_endian(struct tap_queue * q)64 static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
65 {
66 return virtio_legacy_is_little_endian();
67 }
68
tap_get_vnet_be(struct tap_queue * q,int __user * argp)69 static long tap_get_vnet_be(struct tap_queue *q, int __user *argp)
70 {
71 return -EINVAL;
72 }
73
tap_set_vnet_be(struct tap_queue * q,int __user * argp)74 static long tap_set_vnet_be(struct tap_queue *q, int __user *argp)
75 {
76 return -EINVAL;
77 }
78 #endif /* CONFIG_TUN_VNET_CROSS_LE */
79
tap_is_little_endian(struct tap_queue * q)80 static inline bool tap_is_little_endian(struct tap_queue *q)
81 {
82 return q->flags & TAP_VNET_LE ||
83 tap_legacy_is_little_endian(q);
84 }
85
tap16_to_cpu(struct tap_queue * q,__virtio16 val)86 static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val)
87 {
88 return __virtio16_to_cpu(tap_is_little_endian(q), val);
89 }
90
cpu_to_tap16(struct tap_queue * q,u16 val)91 static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val)
92 {
93 return __cpu_to_virtio16(tap_is_little_endian(q), val);
94 }
95
96 static struct proto tap_proto = {
97 .name = "tap",
98 .owner = THIS_MODULE,
99 .obj_size = sizeof(struct tap_queue),
100 };
101
102 #define TAP_NUM_DEVS (1U << MINORBITS)
103
104 static LIST_HEAD(major_list);
105
106 struct major_info {
107 struct rcu_head rcu;
108 dev_t major;
109 struct idr minor_idr;
110 spinlock_t minor_lock;
111 const char *device_name;
112 struct list_head next;
113 };
114
115 #define GOODCOPY_LEN 128
116
117 static const struct proto_ops tap_socket_ops;
118
119 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
120 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
121
tap_dev_get_rcu(const struct net_device * dev)122 static struct tap_dev *tap_dev_get_rcu(const struct net_device *dev)
123 {
124 return rcu_dereference(dev->rx_handler_data);
125 }
126
127 /*
128 * RCU usage:
129 * The tap_queue and the macvlan_dev are loosely coupled, the
130 * pointers from one to the other can only be read while rcu_read_lock
131 * or rtnl is held.
132 *
133 * Both the file and the macvlan_dev hold a reference on the tap_queue
134 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
135 * q->vlan becomes inaccessible. When the files gets closed,
136 * tap_get_queue() fails.
137 *
138 * There may still be references to the struct sock inside of the
139 * queue from outbound SKBs, but these never reference back to the
140 * file or the dev. The data structure is freed through __sk_free
141 * when both our references and any pending SKBs are gone.
142 */
143
tap_enable_queue(struct tap_dev * tap,struct file * file,struct tap_queue * q)144 static int tap_enable_queue(struct tap_dev *tap, struct file *file,
145 struct tap_queue *q)
146 {
147 int err = -EINVAL;
148
149 ASSERT_RTNL();
150
151 if (q->enabled)
152 goto out;
153
154 err = 0;
155 rcu_assign_pointer(tap->taps[tap->numvtaps], q);
156 q->queue_index = tap->numvtaps;
157 q->enabled = true;
158
159 tap->numvtaps++;
160 out:
161 return err;
162 }
163
164 /* Requires RTNL */
tap_set_queue(struct tap_dev * tap,struct file * file,struct tap_queue * q)165 static int tap_set_queue(struct tap_dev *tap, struct file *file,
166 struct tap_queue *q)
167 {
168 if (tap->numqueues == MAX_TAP_QUEUES)
169 return -EBUSY;
170
171 rcu_assign_pointer(q->tap, tap);
172 rcu_assign_pointer(tap->taps[tap->numvtaps], q);
173 sock_hold(&q->sk);
174
175 q->file = file;
176 q->queue_index = tap->numvtaps;
177 q->enabled = true;
178 file->private_data = q;
179 list_add_tail(&q->next, &tap->queue_list);
180
181 tap->numvtaps++;
182 tap->numqueues++;
183
184 return 0;
185 }
186
tap_disable_queue(struct tap_queue * q)187 static int tap_disable_queue(struct tap_queue *q)
188 {
189 struct tap_dev *tap;
190 struct tap_queue *nq;
191
192 ASSERT_RTNL();
193 if (!q->enabled)
194 return -EINVAL;
195
196 tap = rtnl_dereference(q->tap);
197
198 if (tap) {
199 int index = q->queue_index;
200 BUG_ON(index >= tap->numvtaps);
201 nq = rtnl_dereference(tap->taps[tap->numvtaps - 1]);
202 nq->queue_index = index;
203
204 rcu_assign_pointer(tap->taps[index], nq);
205 RCU_INIT_POINTER(tap->taps[tap->numvtaps - 1], NULL);
206 q->enabled = false;
207
208 tap->numvtaps--;
209 }
210
211 return 0;
212 }
213
214 /*
215 * The file owning the queue got closed, give up both
216 * the reference that the files holds as well as the
217 * one from the macvlan_dev if that still exists.
218 *
219 * Using the spinlock makes sure that we don't get
220 * to the queue again after destroying it.
221 */
tap_put_queue(struct tap_queue * q)222 static void tap_put_queue(struct tap_queue *q)
223 {
224 struct tap_dev *tap;
225
226 rtnl_lock();
227 tap = rtnl_dereference(q->tap);
228
229 if (tap) {
230 if (q->enabled)
231 BUG_ON(tap_disable_queue(q));
232
233 tap->numqueues--;
234 RCU_INIT_POINTER(q->tap, NULL);
235 sock_put(&q->sk);
236 list_del_init(&q->next);
237 }
238
239 rtnl_unlock();
240
241 synchronize_rcu();
242 sock_put(&q->sk);
243 }
244
245 /*
246 * Select a queue based on the rxq of the device on which this packet
247 * arrived. If the incoming device is not mq, calculate a flow hash
248 * to select a queue. If all fails, find the first available queue.
249 * Cache vlan->numvtaps since it can become zero during the execution
250 * of this function.
251 */
tap_get_queue(struct tap_dev * tap,struct sk_buff * skb)252 static struct tap_queue *tap_get_queue(struct tap_dev *tap,
253 struct sk_buff *skb)
254 {
255 struct tap_queue *queue = NULL;
256 /* Access to taps array is protected by rcu, but access to numvtaps
257 * isn't. Below we use it to lookup a queue, but treat it as a hint
258 * and validate that the result isn't NULL - in case we are
259 * racing against queue removal.
260 */
261 int numvtaps = READ_ONCE(tap->numvtaps);
262 __u32 rxq;
263
264 if (!numvtaps)
265 goto out;
266
267 if (numvtaps == 1)
268 goto single;
269
270 /* Check if we can use flow to select a queue */
271 rxq = skb_get_hash(skb);
272 if (rxq) {
273 queue = rcu_dereference(tap->taps[rxq % numvtaps]);
274 goto out;
275 }
276
277 if (likely(skb_rx_queue_recorded(skb))) {
278 rxq = skb_get_rx_queue(skb);
279
280 while (unlikely(rxq >= numvtaps))
281 rxq -= numvtaps;
282
283 queue = rcu_dereference(tap->taps[rxq]);
284 goto out;
285 }
286
287 single:
288 queue = rcu_dereference(tap->taps[0]);
289 out:
290 return queue;
291 }
292
293 /*
294 * The net_device is going away, give up the reference
295 * that it holds on all queues and safely set the pointer
296 * from the queues to NULL.
297 */
tap_del_queues(struct tap_dev * tap)298 void tap_del_queues(struct tap_dev *tap)
299 {
300 struct tap_queue *q, *tmp;
301
302 ASSERT_RTNL();
303 list_for_each_entry_safe(q, tmp, &tap->queue_list, next) {
304 list_del_init(&q->next);
305 RCU_INIT_POINTER(q->tap, NULL);
306 if (q->enabled)
307 tap->numvtaps--;
308 tap->numqueues--;
309 sock_put(&q->sk);
310 }
311 BUG_ON(tap->numvtaps);
312 BUG_ON(tap->numqueues);
313 /* guarantee that any future tap_set_queue will fail */
314 tap->numvtaps = MAX_TAP_QUEUES;
315 }
316 EXPORT_SYMBOL_GPL(tap_del_queues);
317
tap_handle_frame(struct sk_buff ** pskb)318 rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
319 {
320 struct sk_buff *skb = *pskb;
321 struct net_device *dev = skb->dev;
322 struct tap_dev *tap;
323 struct tap_queue *q;
324 netdev_features_t features = TAP_FEATURES;
325
326 tap = tap_dev_get_rcu(dev);
327 if (!tap)
328 return RX_HANDLER_PASS;
329
330 q = tap_get_queue(tap, skb);
331 if (!q)
332 return RX_HANDLER_PASS;
333
334 skb_push(skb, ETH_HLEN);
335
336 /* Apply the forward feature mask so that we perform segmentation
337 * according to users wishes. This only works if VNET_HDR is
338 * enabled.
339 */
340 if (q->flags & IFF_VNET_HDR)
341 features |= tap->tap_features;
342 if (netif_needs_gso(skb, features)) {
343 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
344 struct sk_buff *next;
345
346 if (IS_ERR(segs))
347 goto drop;
348
349 if (!segs) {
350 if (ptr_ring_produce(&q->ring, skb))
351 goto drop;
352 goto wake_up;
353 }
354
355 consume_skb(skb);
356 skb_list_walk_safe(segs, skb, next) {
357 skb_mark_not_on_list(skb);
358 if (ptr_ring_produce(&q->ring, skb)) {
359 kfree_skb(skb);
360 kfree_skb_list(next);
361 break;
362 }
363 }
364 } else {
365 /* If we receive a partial checksum and the tap side
366 * doesn't support checksum offload, compute the checksum.
367 * Note: it doesn't matter which checksum feature to
368 * check, we either support them all or none.
369 */
370 if (skb->ip_summed == CHECKSUM_PARTIAL &&
371 !(features & NETIF_F_CSUM_MASK) &&
372 skb_checksum_help(skb))
373 goto drop;
374 if (ptr_ring_produce(&q->ring, skb))
375 goto drop;
376 }
377
378 wake_up:
379 wake_up_interruptible_poll(sk_sleep(&q->sk), EPOLLIN | EPOLLRDNORM | EPOLLRDBAND);
380 return RX_HANDLER_CONSUMED;
381
382 drop:
383 /* Count errors/drops only here, thus don't care about args. */
384 if (tap->count_rx_dropped)
385 tap->count_rx_dropped(tap);
386 kfree_skb(skb);
387 return RX_HANDLER_CONSUMED;
388 }
389 EXPORT_SYMBOL_GPL(tap_handle_frame);
390
tap_get_major(int major)391 static struct major_info *tap_get_major(int major)
392 {
393 struct major_info *tap_major;
394
395 list_for_each_entry_rcu(tap_major, &major_list, next) {
396 if (tap_major->major == major)
397 return tap_major;
398 }
399
400 return NULL;
401 }
402
tap_get_minor(dev_t major,struct tap_dev * tap)403 int tap_get_minor(dev_t major, struct tap_dev *tap)
404 {
405 int retval = -ENOMEM;
406 struct major_info *tap_major;
407
408 rcu_read_lock();
409 tap_major = tap_get_major(MAJOR(major));
410 if (!tap_major) {
411 retval = -EINVAL;
412 goto unlock;
413 }
414
415 spin_lock(&tap_major->minor_lock);
416 retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_ATOMIC);
417 if (retval >= 0) {
418 tap->minor = retval;
419 } else if (retval == -ENOSPC) {
420 netdev_err(tap->dev, "Too many tap devices\n");
421 retval = -EINVAL;
422 }
423 spin_unlock(&tap_major->minor_lock);
424
425 unlock:
426 rcu_read_unlock();
427 return retval < 0 ? retval : 0;
428 }
429 EXPORT_SYMBOL_GPL(tap_get_minor);
430
tap_free_minor(dev_t major,struct tap_dev * tap)431 void tap_free_minor(dev_t major, struct tap_dev *tap)
432 {
433 struct major_info *tap_major;
434
435 rcu_read_lock();
436 tap_major = tap_get_major(MAJOR(major));
437 if (!tap_major) {
438 goto unlock;
439 }
440
441 spin_lock(&tap_major->minor_lock);
442 if (tap->minor) {
443 idr_remove(&tap_major->minor_idr, tap->minor);
444 tap->minor = 0;
445 }
446 spin_unlock(&tap_major->minor_lock);
447
448 unlock:
449 rcu_read_unlock();
450 }
451 EXPORT_SYMBOL_GPL(tap_free_minor);
452
dev_get_by_tap_file(int major,int minor)453 static struct tap_dev *dev_get_by_tap_file(int major, int minor)
454 {
455 struct net_device *dev = NULL;
456 struct tap_dev *tap;
457 struct major_info *tap_major;
458
459 rcu_read_lock();
460 tap_major = tap_get_major(major);
461 if (!tap_major) {
462 tap = NULL;
463 goto unlock;
464 }
465
466 spin_lock(&tap_major->minor_lock);
467 tap = idr_find(&tap_major->minor_idr, minor);
468 if (tap) {
469 dev = tap->dev;
470 dev_hold(dev);
471 }
472 spin_unlock(&tap_major->minor_lock);
473
474 unlock:
475 rcu_read_unlock();
476 return tap;
477 }
478
tap_sock_write_space(struct sock * sk)479 static void tap_sock_write_space(struct sock *sk)
480 {
481 wait_queue_head_t *wqueue;
482
483 if (!sock_writeable(sk) ||
484 !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
485 return;
486
487 wqueue = sk_sleep(sk);
488 if (wqueue && waitqueue_active(wqueue))
489 wake_up_interruptible_poll(wqueue, EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
490 }
491
tap_sock_destruct(struct sock * sk)492 static void tap_sock_destruct(struct sock *sk)
493 {
494 struct tap_queue *q = container_of(sk, struct tap_queue, sk);
495
496 ptr_ring_cleanup(&q->ring, __skb_array_destroy_skb);
497 }
498
tap_open(struct inode * inode,struct file * file)499 static int tap_open(struct inode *inode, struct file *file)
500 {
501 struct net *net = current->nsproxy->net_ns;
502 struct tap_dev *tap;
503 struct tap_queue *q;
504 int err = -ENODEV;
505
506 rtnl_lock();
507 tap = dev_get_by_tap_file(imajor(inode), iminor(inode));
508 if (!tap)
509 goto err;
510
511 err = -ENOMEM;
512 q = (struct tap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
513 &tap_proto, 0);
514 if (!q)
515 goto err;
516 if (ptr_ring_init(&q->ring, tap->dev->tx_queue_len, GFP_KERNEL)) {
517 sk_free(&q->sk);
518 goto err;
519 }
520
521 init_waitqueue_head(&q->sock.wq.wait);
522 q->sock.type = SOCK_RAW;
523 q->sock.state = SS_CONNECTED;
524 q->sock.file = file;
525 q->sock.ops = &tap_socket_ops;
526 sock_init_data(&q->sock, &q->sk);
527 q->sk.sk_write_space = tap_sock_write_space;
528 q->sk.sk_destruct = tap_sock_destruct;
529 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
530 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
531
532 /*
533 * so far only KVM virtio_net uses tap, enable zero copy between
534 * guest kernel and host kernel when lower device supports zerocopy
535 *
536 * The macvlan supports zerocopy iff the lower device supports zero
537 * copy so we don't have to look at the lower device directly.
538 */
539 if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG))
540 sock_set_flag(&q->sk, SOCK_ZEROCOPY);
541
542 err = tap_set_queue(tap, file, q);
543 if (err) {
544 /* tap_sock_destruct() will take care of freeing ptr_ring */
545 goto err_put;
546 }
547
548 dev_put(tap->dev);
549
550 rtnl_unlock();
551 return err;
552
553 err_put:
554 sock_put(&q->sk);
555 err:
556 if (tap)
557 dev_put(tap->dev);
558
559 rtnl_unlock();
560 return err;
561 }
562
tap_release(struct inode * inode,struct file * file)563 static int tap_release(struct inode *inode, struct file *file)
564 {
565 struct tap_queue *q = file->private_data;
566 tap_put_queue(q);
567 return 0;
568 }
569
tap_poll(struct file * file,poll_table * wait)570 static __poll_t tap_poll(struct file *file, poll_table *wait)
571 {
572 struct tap_queue *q = file->private_data;
573 __poll_t mask = EPOLLERR;
574
575 if (!q)
576 goto out;
577
578 mask = 0;
579 poll_wait(file, &q->sock.wq.wait, wait);
580
581 if (!ptr_ring_empty(&q->ring))
582 mask |= EPOLLIN | EPOLLRDNORM;
583
584 if (sock_writeable(&q->sk) ||
585 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
586 sock_writeable(&q->sk)))
587 mask |= EPOLLOUT | EPOLLWRNORM;
588
589 out:
590 return mask;
591 }
592
tap_alloc_skb(struct sock * sk,size_t prepad,size_t len,size_t linear,int noblock,int * err)593 static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad,
594 size_t len, size_t linear,
595 int noblock, int *err)
596 {
597 struct sk_buff *skb;
598
599 /* Under a page? Don't bother with paged skb. */
600 if (prepad + len < PAGE_SIZE || !linear)
601 linear = len;
602
603 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
604 err, 0);
605 if (!skb)
606 return NULL;
607
608 skb_reserve(skb, prepad);
609 skb_put(skb, linear);
610 skb->data_len = len - linear;
611 skb->len += len - linear;
612
613 return skb;
614 }
615
616 /* Neighbour code has some assumptions on HH_DATA_MOD alignment */
617 #define TAP_RESERVE HH_DATA_OFF(ETH_HLEN)
618
619 /* Get packet from user space buffer */
tap_get_user(struct tap_queue * q,void * msg_control,struct iov_iter * from,int noblock)620 static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
621 struct iov_iter *from, int noblock)
622 {
623 int good_linear = SKB_MAX_HEAD(TAP_RESERVE);
624 struct sk_buff *skb;
625 struct tap_dev *tap;
626 unsigned long total_len = iov_iter_count(from);
627 unsigned long len = total_len;
628 int err;
629 struct virtio_net_hdr vnet_hdr = { 0 };
630 int vnet_hdr_len = 0;
631 int copylen = 0;
632 int depth;
633 bool zerocopy = false;
634 size_t linear;
635
636 if (q->flags & IFF_VNET_HDR) {
637 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
638
639 err = -EINVAL;
640 if (len < vnet_hdr_len)
641 goto err;
642 len -= vnet_hdr_len;
643
644 err = -EFAULT;
645 if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from))
646 goto err;
647 iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
648 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
649 tap16_to_cpu(q, vnet_hdr.csum_start) +
650 tap16_to_cpu(q, vnet_hdr.csum_offset) + 2 >
651 tap16_to_cpu(q, vnet_hdr.hdr_len))
652 vnet_hdr.hdr_len = cpu_to_tap16(q,
653 tap16_to_cpu(q, vnet_hdr.csum_start) +
654 tap16_to_cpu(q, vnet_hdr.csum_offset) + 2);
655 err = -EINVAL;
656 if (tap16_to_cpu(q, vnet_hdr.hdr_len) > len)
657 goto err;
658 }
659
660 err = -EINVAL;
661 if (unlikely(len < ETH_HLEN))
662 goto err;
663
664 if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
665 struct iov_iter i;
666
667 copylen = vnet_hdr.hdr_len ?
668 tap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
669 if (copylen > good_linear)
670 copylen = good_linear;
671 else if (copylen < ETH_HLEN)
672 copylen = ETH_HLEN;
673 linear = copylen;
674 i = *from;
675 iov_iter_advance(&i, copylen);
676 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
677 zerocopy = true;
678 }
679
680 if (!zerocopy) {
681 copylen = len;
682 linear = tap16_to_cpu(q, vnet_hdr.hdr_len);
683 if (linear > good_linear)
684 linear = good_linear;
685 else if (linear < ETH_HLEN)
686 linear = ETH_HLEN;
687 }
688
689 skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen,
690 linear, noblock, &err);
691 if (!skb)
692 goto err;
693
694 if (zerocopy)
695 err = zerocopy_sg_from_iter(skb, from);
696 else
697 err = skb_copy_datagram_from_iter(skb, 0, from, len);
698
699 if (err)
700 goto err_kfree;
701
702 skb_set_network_header(skb, ETH_HLEN);
703 skb_reset_mac_header(skb);
704 skb->protocol = eth_hdr(skb)->h_proto;
705
706 if (vnet_hdr_len) {
707 err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
708 tap_is_little_endian(q));
709 if (err)
710 goto err_kfree;
711 }
712
713 skb_probe_transport_header(skb);
714
715 /* Move network header to the right position for VLAN tagged packets */
716 if (eth_type_vlan(skb->protocol) &&
717 __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
718 skb_set_network_header(skb, depth);
719
720 rcu_read_lock();
721 tap = rcu_dereference(q->tap);
722 /* copy skb_ubuf_info for callback when skb has no error */
723 if (zerocopy) {
724 skb_zcopy_init(skb, msg_control);
725 } else if (msg_control) {
726 struct ubuf_info *uarg = msg_control;
727 uarg->callback(NULL, uarg, false);
728 }
729
730 if (tap) {
731 skb->dev = tap->dev;
732 dev_queue_xmit(skb);
733 } else {
734 kfree_skb(skb);
735 }
736 rcu_read_unlock();
737
738 return total_len;
739
740 err_kfree:
741 kfree_skb(skb);
742
743 err:
744 rcu_read_lock();
745 tap = rcu_dereference(q->tap);
746 if (tap && tap->count_tx_dropped)
747 tap->count_tx_dropped(tap);
748 rcu_read_unlock();
749
750 return err;
751 }
752
tap_write_iter(struct kiocb * iocb,struct iov_iter * from)753 static ssize_t tap_write_iter(struct kiocb *iocb, struct iov_iter *from)
754 {
755 struct file *file = iocb->ki_filp;
756 struct tap_queue *q = file->private_data;
757
758 return tap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK);
759 }
760
761 /* Put packet to the user space buffer */
tap_put_user(struct tap_queue * q,const struct sk_buff * skb,struct iov_iter * iter)762 static ssize_t tap_put_user(struct tap_queue *q,
763 const struct sk_buff *skb,
764 struct iov_iter *iter)
765 {
766 int ret;
767 int vnet_hdr_len = 0;
768 int vlan_offset = 0;
769 int total;
770
771 if (q->flags & IFF_VNET_HDR) {
772 int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
773 struct virtio_net_hdr vnet_hdr;
774
775 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
776 if (iov_iter_count(iter) < vnet_hdr_len)
777 return -EINVAL;
778
779 if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
780 tap_is_little_endian(q), true,
781 vlan_hlen))
782 BUG();
783
784 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
785 sizeof(vnet_hdr))
786 return -EFAULT;
787
788 iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
789 }
790 total = vnet_hdr_len;
791 total += skb->len;
792
793 if (skb_vlan_tag_present(skb)) {
794 struct {
795 __be16 h_vlan_proto;
796 __be16 h_vlan_TCI;
797 } veth;
798 veth.h_vlan_proto = skb->vlan_proto;
799 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
800
801 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
802 total += VLAN_HLEN;
803
804 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
805 if (ret || !iov_iter_count(iter))
806 goto done;
807
808 ret = copy_to_iter(&veth, sizeof(veth), iter);
809 if (ret != sizeof(veth) || !iov_iter_count(iter))
810 goto done;
811 }
812
813 ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
814 skb->len - vlan_offset);
815
816 done:
817 return ret ? ret : total;
818 }
819
tap_do_read(struct tap_queue * q,struct iov_iter * to,int noblock,struct sk_buff * skb)820 static ssize_t tap_do_read(struct tap_queue *q,
821 struct iov_iter *to,
822 int noblock, struct sk_buff *skb)
823 {
824 DEFINE_WAIT(wait);
825 ssize_t ret = 0;
826
827 if (!iov_iter_count(to)) {
828 kfree_skb(skb);
829 return 0;
830 }
831
832 if (skb)
833 goto put;
834
835 while (1) {
836 if (!noblock)
837 prepare_to_wait(sk_sleep(&q->sk), &wait,
838 TASK_INTERRUPTIBLE);
839
840 /* Read frames from the queue */
841 skb = ptr_ring_consume(&q->ring);
842 if (skb)
843 break;
844 if (noblock) {
845 ret = -EAGAIN;
846 break;
847 }
848 if (signal_pending(current)) {
849 ret = -ERESTARTSYS;
850 break;
851 }
852 /* Nothing to read, let's sleep */
853 schedule();
854 }
855 if (!noblock)
856 finish_wait(sk_sleep(&q->sk), &wait);
857
858 put:
859 if (skb) {
860 ret = tap_put_user(q, skb, to);
861 if (unlikely(ret < 0))
862 kfree_skb(skb);
863 else
864 consume_skb(skb);
865 }
866 return ret;
867 }
868
tap_read_iter(struct kiocb * iocb,struct iov_iter * to)869 static ssize_t tap_read_iter(struct kiocb *iocb, struct iov_iter *to)
870 {
871 struct file *file = iocb->ki_filp;
872 struct tap_queue *q = file->private_data;
873 ssize_t len = iov_iter_count(to), ret;
874
875 ret = tap_do_read(q, to, file->f_flags & O_NONBLOCK, NULL);
876 ret = min_t(ssize_t, ret, len);
877 if (ret > 0)
878 iocb->ki_pos = ret;
879 return ret;
880 }
881
tap_get_tap_dev(struct tap_queue * q)882 static struct tap_dev *tap_get_tap_dev(struct tap_queue *q)
883 {
884 struct tap_dev *tap;
885
886 ASSERT_RTNL();
887 tap = rtnl_dereference(q->tap);
888 if (tap)
889 dev_hold(tap->dev);
890
891 return tap;
892 }
893
tap_put_tap_dev(struct tap_dev * tap)894 static void tap_put_tap_dev(struct tap_dev *tap)
895 {
896 dev_put(tap->dev);
897 }
898
tap_ioctl_set_queue(struct file * file,unsigned int flags)899 static int tap_ioctl_set_queue(struct file *file, unsigned int flags)
900 {
901 struct tap_queue *q = file->private_data;
902 struct tap_dev *tap;
903 int ret;
904
905 tap = tap_get_tap_dev(q);
906 if (!tap)
907 return -EINVAL;
908
909 if (flags & IFF_ATTACH_QUEUE)
910 ret = tap_enable_queue(tap, file, q);
911 else if (flags & IFF_DETACH_QUEUE)
912 ret = tap_disable_queue(q);
913 else
914 ret = -EINVAL;
915
916 tap_put_tap_dev(tap);
917 return ret;
918 }
919
set_offload(struct tap_queue * q,unsigned long arg)920 static int set_offload(struct tap_queue *q, unsigned long arg)
921 {
922 struct tap_dev *tap;
923 netdev_features_t features;
924 netdev_features_t feature_mask = 0;
925
926 tap = rtnl_dereference(q->tap);
927 if (!tap)
928 return -ENOLINK;
929
930 features = tap->dev->features;
931
932 if (arg & TUN_F_CSUM) {
933 feature_mask = NETIF_F_HW_CSUM;
934
935 if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
936 if (arg & TUN_F_TSO_ECN)
937 feature_mask |= NETIF_F_TSO_ECN;
938 if (arg & TUN_F_TSO4)
939 feature_mask |= NETIF_F_TSO;
940 if (arg & TUN_F_TSO6)
941 feature_mask |= NETIF_F_TSO6;
942 }
943 }
944
945 /* tun/tap driver inverts the usage for TSO offloads, where
946 * setting the TSO bit means that the userspace wants to
947 * accept TSO frames and turning it off means that user space
948 * does not support TSO.
949 * For tap, we have to invert it to mean the same thing.
950 * When user space turns off TSO, we turn off GSO/LRO so that
951 * user-space will not receive TSO frames.
952 */
953 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6))
954 features |= RX_OFFLOADS;
955 else
956 features &= ~RX_OFFLOADS;
957
958 /* tap_features are the same as features on tun/tap and
959 * reflect user expectations.
960 */
961 tap->tap_features = feature_mask;
962 if (tap->update_features)
963 tap->update_features(tap, features);
964
965 return 0;
966 }
967
968 /*
969 * provide compatibility with generic tun/tap interface
970 */
tap_ioctl(struct file * file,unsigned int cmd,unsigned long arg)971 static long tap_ioctl(struct file *file, unsigned int cmd,
972 unsigned long arg)
973 {
974 struct tap_queue *q = file->private_data;
975 struct tap_dev *tap;
976 void __user *argp = (void __user *)arg;
977 struct ifreq __user *ifr = argp;
978 unsigned int __user *up = argp;
979 unsigned short u;
980 int __user *sp = argp;
981 struct sockaddr sa;
982 int s;
983 int ret;
984
985 switch (cmd) {
986 case TUNSETIFF:
987 /* ignore the name, just look at flags */
988 if (get_user(u, &ifr->ifr_flags))
989 return -EFAULT;
990
991 ret = 0;
992 if ((u & ~TAP_IFFEATURES) != (IFF_NO_PI | IFF_TAP))
993 ret = -EINVAL;
994 else
995 q->flags = (q->flags & ~TAP_IFFEATURES) | u;
996
997 return ret;
998
999 case TUNGETIFF:
1000 rtnl_lock();
1001 tap = tap_get_tap_dev(q);
1002 if (!tap) {
1003 rtnl_unlock();
1004 return -ENOLINK;
1005 }
1006
1007 ret = 0;
1008 u = q->flags;
1009 if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
1010 put_user(u, &ifr->ifr_flags))
1011 ret = -EFAULT;
1012 tap_put_tap_dev(tap);
1013 rtnl_unlock();
1014 return ret;
1015
1016 case TUNSETQUEUE:
1017 if (get_user(u, &ifr->ifr_flags))
1018 return -EFAULT;
1019 rtnl_lock();
1020 ret = tap_ioctl_set_queue(file, u);
1021 rtnl_unlock();
1022 return ret;
1023
1024 case TUNGETFEATURES:
1025 if (put_user(IFF_TAP | IFF_NO_PI | TAP_IFFEATURES, up))
1026 return -EFAULT;
1027 return 0;
1028
1029 case TUNSETSNDBUF:
1030 if (get_user(s, sp))
1031 return -EFAULT;
1032 if (s <= 0)
1033 return -EINVAL;
1034
1035 q->sk.sk_sndbuf = s;
1036 return 0;
1037
1038 case TUNGETVNETHDRSZ:
1039 s = q->vnet_hdr_sz;
1040 if (put_user(s, sp))
1041 return -EFAULT;
1042 return 0;
1043
1044 case TUNSETVNETHDRSZ:
1045 if (get_user(s, sp))
1046 return -EFAULT;
1047 if (s < (int)sizeof(struct virtio_net_hdr))
1048 return -EINVAL;
1049
1050 q->vnet_hdr_sz = s;
1051 return 0;
1052
1053 case TUNGETVNETLE:
1054 s = !!(q->flags & TAP_VNET_LE);
1055 if (put_user(s, sp))
1056 return -EFAULT;
1057 return 0;
1058
1059 case TUNSETVNETLE:
1060 if (get_user(s, sp))
1061 return -EFAULT;
1062 if (s)
1063 q->flags |= TAP_VNET_LE;
1064 else
1065 q->flags &= ~TAP_VNET_LE;
1066 return 0;
1067
1068 case TUNGETVNETBE:
1069 return tap_get_vnet_be(q, sp);
1070
1071 case TUNSETVNETBE:
1072 return tap_set_vnet_be(q, sp);
1073
1074 case TUNSETOFFLOAD:
1075 /* let the user check for future flags */
1076 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
1077 TUN_F_TSO_ECN | TUN_F_UFO))
1078 return -EINVAL;
1079
1080 rtnl_lock();
1081 ret = set_offload(q, arg);
1082 rtnl_unlock();
1083 return ret;
1084
1085 case SIOCGIFHWADDR:
1086 rtnl_lock();
1087 tap = tap_get_tap_dev(q);
1088 if (!tap) {
1089 rtnl_unlock();
1090 return -ENOLINK;
1091 }
1092 ret = 0;
1093 dev_get_mac_address(&sa, dev_net(tap->dev), tap->dev->name);
1094 if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
1095 copy_to_user(&ifr->ifr_hwaddr, &sa, sizeof(sa)))
1096 ret = -EFAULT;
1097 tap_put_tap_dev(tap);
1098 rtnl_unlock();
1099 return ret;
1100
1101 case SIOCSIFHWADDR:
1102 if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
1103 return -EFAULT;
1104 rtnl_lock();
1105 tap = tap_get_tap_dev(q);
1106 if (!tap) {
1107 rtnl_unlock();
1108 return -ENOLINK;
1109 }
1110 ret = dev_set_mac_address_user(tap->dev, &sa, NULL);
1111 tap_put_tap_dev(tap);
1112 rtnl_unlock();
1113 return ret;
1114
1115 default:
1116 return -EINVAL;
1117 }
1118 }
1119
1120 static const struct file_operations tap_fops = {
1121 .owner = THIS_MODULE,
1122 .open = tap_open,
1123 .release = tap_release,
1124 .read_iter = tap_read_iter,
1125 .write_iter = tap_write_iter,
1126 .poll = tap_poll,
1127 .llseek = no_llseek,
1128 .unlocked_ioctl = tap_ioctl,
1129 .compat_ioctl = compat_ptr_ioctl,
1130 };
1131
tap_get_user_xdp(struct tap_queue * q,struct xdp_buff * xdp)1132 static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
1133 {
1134 struct tun_xdp_hdr *hdr = xdp->data_hard_start;
1135 struct virtio_net_hdr *gso = &hdr->gso;
1136 int buflen = hdr->buflen;
1137 int vnet_hdr_len = 0;
1138 struct tap_dev *tap;
1139 struct sk_buff *skb;
1140 int err, depth;
1141
1142 if (q->flags & IFF_VNET_HDR)
1143 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
1144
1145 skb = build_skb(xdp->data_hard_start, buflen);
1146 if (!skb) {
1147 err = -ENOMEM;
1148 goto err;
1149 }
1150
1151 skb_reserve(skb, xdp->data - xdp->data_hard_start);
1152 skb_put(skb, xdp->data_end - xdp->data);
1153
1154 skb_set_network_header(skb, ETH_HLEN);
1155 skb_reset_mac_header(skb);
1156 skb->protocol = eth_hdr(skb)->h_proto;
1157
1158 if (vnet_hdr_len) {
1159 err = virtio_net_hdr_to_skb(skb, gso, tap_is_little_endian(q));
1160 if (err)
1161 goto err_kfree;
1162 }
1163
1164 /* Move network header to the right position for VLAN tagged packets */
1165 if (eth_type_vlan(skb->protocol) &&
1166 __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
1167 skb_set_network_header(skb, depth);
1168
1169 rcu_read_lock();
1170 tap = rcu_dereference(q->tap);
1171 if (tap) {
1172 skb->dev = tap->dev;
1173 skb_probe_transport_header(skb);
1174 dev_queue_xmit(skb);
1175 } else {
1176 kfree_skb(skb);
1177 }
1178 rcu_read_unlock();
1179
1180 return 0;
1181
1182 err_kfree:
1183 kfree_skb(skb);
1184 err:
1185 rcu_read_lock();
1186 tap = rcu_dereference(q->tap);
1187 if (tap && tap->count_tx_dropped)
1188 tap->count_tx_dropped(tap);
1189 rcu_read_unlock();
1190 return err;
1191 }
1192
tap_sendmsg(struct socket * sock,struct msghdr * m,size_t total_len)1193 static int tap_sendmsg(struct socket *sock, struct msghdr *m,
1194 size_t total_len)
1195 {
1196 struct tap_queue *q = container_of(sock, struct tap_queue, sock);
1197 struct tun_msg_ctl *ctl = m->msg_control;
1198 struct xdp_buff *xdp;
1199 int i;
1200
1201 if (ctl && (ctl->type == TUN_MSG_PTR)) {
1202 for (i = 0; i < ctl->num; i++) {
1203 xdp = &((struct xdp_buff *)ctl->ptr)[i];
1204 tap_get_user_xdp(q, xdp);
1205 }
1206 return 0;
1207 }
1208
1209 return tap_get_user(q, ctl ? ctl->ptr : NULL, &m->msg_iter,
1210 m->msg_flags & MSG_DONTWAIT);
1211 }
1212
tap_recvmsg(struct socket * sock,struct msghdr * m,size_t total_len,int flags)1213 static int tap_recvmsg(struct socket *sock, struct msghdr *m,
1214 size_t total_len, int flags)
1215 {
1216 struct tap_queue *q = container_of(sock, struct tap_queue, sock);
1217 struct sk_buff *skb = m->msg_control;
1218 int ret;
1219 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
1220 kfree_skb(skb);
1221 return -EINVAL;
1222 }
1223 ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb);
1224 if (ret > total_len) {
1225 m->msg_flags |= MSG_TRUNC;
1226 ret = flags & MSG_TRUNC ? ret : total_len;
1227 }
1228 return ret;
1229 }
1230
tap_peek_len(struct socket * sock)1231 static int tap_peek_len(struct socket *sock)
1232 {
1233 struct tap_queue *q = container_of(sock, struct tap_queue,
1234 sock);
1235 return PTR_RING_PEEK_CALL(&q->ring, __skb_array_len_with_tag);
1236 }
1237
1238 /* Ops structure to mimic raw sockets with tun */
1239 static const struct proto_ops tap_socket_ops = {
1240 .sendmsg = tap_sendmsg,
1241 .recvmsg = tap_recvmsg,
1242 .peek_len = tap_peek_len,
1243 };
1244
1245 /* Get an underlying socket object from tun file. Returns error unless file is
1246 * attached to a device. The returned object works like a packet socket, it
1247 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
1248 * holding a reference to the file for as long as the socket is in use. */
tap_get_socket(struct file * file)1249 struct socket *tap_get_socket(struct file *file)
1250 {
1251 struct tap_queue *q;
1252 if (file->f_op != &tap_fops)
1253 return ERR_PTR(-EINVAL);
1254 q = file->private_data;
1255 if (!q)
1256 return ERR_PTR(-EBADFD);
1257 return &q->sock;
1258 }
1259 EXPORT_SYMBOL_GPL(tap_get_socket);
1260
tap_get_ptr_ring(struct file * file)1261 struct ptr_ring *tap_get_ptr_ring(struct file *file)
1262 {
1263 struct tap_queue *q;
1264
1265 if (file->f_op != &tap_fops)
1266 return ERR_PTR(-EINVAL);
1267 q = file->private_data;
1268 if (!q)
1269 return ERR_PTR(-EBADFD);
1270 return &q->ring;
1271 }
1272 EXPORT_SYMBOL_GPL(tap_get_ptr_ring);
1273
tap_queue_resize(struct tap_dev * tap)1274 int tap_queue_resize(struct tap_dev *tap)
1275 {
1276 struct net_device *dev = tap->dev;
1277 struct tap_queue *q;
1278 struct ptr_ring **rings;
1279 int n = tap->numqueues;
1280 int ret, i = 0;
1281
1282 rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
1283 if (!rings)
1284 return -ENOMEM;
1285
1286 list_for_each_entry(q, &tap->queue_list, next)
1287 rings[i++] = &q->ring;
1288
1289 ret = ptr_ring_resize_multiple(rings, n,
1290 dev->tx_queue_len, GFP_KERNEL,
1291 __skb_array_destroy_skb);
1292
1293 kfree(rings);
1294 return ret;
1295 }
1296 EXPORT_SYMBOL_GPL(tap_queue_resize);
1297
tap_list_add(dev_t major,const char * device_name)1298 static int tap_list_add(dev_t major, const char *device_name)
1299 {
1300 struct major_info *tap_major;
1301
1302 tap_major = kzalloc(sizeof(*tap_major), GFP_ATOMIC);
1303 if (!tap_major)
1304 return -ENOMEM;
1305
1306 tap_major->major = MAJOR(major);
1307
1308 idr_init(&tap_major->minor_idr);
1309 spin_lock_init(&tap_major->minor_lock);
1310
1311 tap_major->device_name = device_name;
1312
1313 list_add_tail_rcu(&tap_major->next, &major_list);
1314 return 0;
1315 }
1316
tap_create_cdev(struct cdev * tap_cdev,dev_t * tap_major,const char * device_name,struct module * module)1317 int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
1318 const char *device_name, struct module *module)
1319 {
1320 int err;
1321
1322 err = alloc_chrdev_region(tap_major, 0, TAP_NUM_DEVS, device_name);
1323 if (err)
1324 goto out1;
1325
1326 cdev_init(tap_cdev, &tap_fops);
1327 tap_cdev->owner = module;
1328 err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS);
1329 if (err)
1330 goto out2;
1331
1332 err = tap_list_add(*tap_major, device_name);
1333 if (err)
1334 goto out3;
1335
1336 return 0;
1337
1338 out3:
1339 cdev_del(tap_cdev);
1340 out2:
1341 unregister_chrdev_region(*tap_major, TAP_NUM_DEVS);
1342 out1:
1343 return err;
1344 }
1345 EXPORT_SYMBOL_GPL(tap_create_cdev);
1346
tap_destroy_cdev(dev_t major,struct cdev * tap_cdev)1347 void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev)
1348 {
1349 struct major_info *tap_major, *tmp;
1350
1351 cdev_del(tap_cdev);
1352 unregister_chrdev_region(major, TAP_NUM_DEVS);
1353 list_for_each_entry_safe(tap_major, tmp, &major_list, next) {
1354 if (tap_major->major == MAJOR(major)) {
1355 idr_destroy(&tap_major->minor_idr);
1356 list_del_rcu(&tap_major->next);
1357 kfree_rcu(tap_major, rcu);
1358 }
1359 }
1360 }
1361 EXPORT_SYMBOL_GPL(tap_destroy_cdev);
1362
1363 MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
1364 MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>");
1365 MODULE_LICENSE("GPL");
1366