Lines Matching +full:- +full:dig +full:- +full:div

1 // SPDX-License-Identifier: GPL-2.0-or-later
7 * PACKET - implements raw packet sockets.
22 * Alan Cox : Re-commented the code.
30 * Alan Cox : New buffers. Use sk->mac.raw.
96 - If the device has no dev->header_ops->create, there is no LL header
103 needed_headroom to be (the real WiFi header length - the fake Ethernet
105 - packet socket receives packets with pulled ll header,
109 -----------
112 mac_header -> ll header
113 data -> data
116 mac_header -> ll header
117 data -> ll header
120 mac_header -> data
123 data -> data
126 mac_header -> data. ll header is invisible to us.
127 data -> data
135 ------------
137 dev->header_ops != NULL
138 mac_header -> ll header
139 data -> ll header
141 dev->header_ops == NULL (ll header is invisible to us)
142 mac_header -> data
143 data -> data
178 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
179 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
180 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
181 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
182 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
183 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
227 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
229 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
231 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
233 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
235 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
236 ((x)->kactive_blk_num+1) : 0)
251 dev = rcu_dereference(po->cached_dev); in packet_cached_dev_get()
262 rcu_assign_pointer(po->cached_dev, dev); in packet_cached_dev_assign()
267 RCU_INIT_POINTER(po->cached_dev, NULL); in packet_cached_dev_reset()
272 return po->xmit == packet_direct_xmit; in packet_use_direct_xmit()
277 struct net_device *dev = skb->dev; in packet_pick_tx_queue()
278 const struct net_device_ops *ops = dev->netdev_ops; in packet_pick_tx_queue()
283 skb->sender_cpu = cpu + 1; in packet_pick_tx_queue()
285 skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues); in packet_pick_tx_queue()
286 if (ops->ndo_select_queue) { in packet_pick_tx_queue()
287 queue_index = ops->ndo_select_queue(dev, skb, NULL); in packet_pick_tx_queue()
304 if (!po->running) { in __register_prot_hook()
305 if (po->fanout) in __register_prot_hook()
308 dev_add_pack(&po->prot_hook); in __register_prot_hook()
311 po->running = 1; in __register_prot_hook()
317 lockdep_assert_held_once(&pkt_sk(sk)->bind_lock); in register_prot_hook()
322 * the po->bind_lock and do a synchronize_net to make sure no
324 * of po->prot_hook. If the sync parameter is false, it is the
331 lockdep_assert_held_once(&po->bind_lock); in __unregister_prot_hook()
333 po->running = 0; in __unregister_prot_hook()
335 if (po->fanout) in __unregister_prot_hook()
338 __dev_remove_pack(&po->prot_hook); in __unregister_prot_hook()
343 spin_unlock(&po->bind_lock); in __unregister_prot_hook()
345 spin_lock(&po->bind_lock); in __unregister_prot_hook()
353 if (po->running) in unregister_prot_hook()
369 switch (po->tp_version) { in __packet_set_status()
371 h.h1->tp_status = status; in __packet_set_status()
372 flush_dcache_page(pgv_to_page(&h.h1->tp_status)); in __packet_set_status()
375 h.h2->tp_status = status; in __packet_set_status()
376 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); in __packet_set_status()
379 h.h3->tp_status = status; in __packet_set_status()
380 flush_dcache_page(pgv_to_page(&h.h3->tp_status)); in __packet_set_status()
397 switch (po->tp_version) { in __packet_get_status()
399 flush_dcache_page(pgv_to_page(&h.h1->tp_status)); in __packet_get_status()
400 return h.h1->tp_status; in __packet_get_status()
402 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); in __packet_get_status()
403 return h.h2->tp_status; in __packet_get_status()
405 flush_dcache_page(pgv_to_page(&h.h3->tp_status)); in __packet_get_status()
406 return h.h3->tp_status; in __packet_get_status()
421 ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts)) in tpacket_get_timestamp()
424 if (ktime_to_timespec64_cond(skb->tstamp, ts)) in tpacket_get_timestamp()
437 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) in __packet_set_timestamp()
443 * all store the seconds in a 32-bit unsigned integer. in __packet_set_timestamp()
444 * If we create a version 4, that should have a 64-bit timestamp, in __packet_set_timestamp()
445 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit in __packet_set_timestamp()
448 switch (po->tp_version) { in __packet_set_timestamp()
450 h.h1->tp_sec = ts.tv_sec; in __packet_set_timestamp()
451 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; in __packet_set_timestamp()
454 h.h2->tp_sec = ts.tv_sec; in __packet_set_timestamp()
455 h.h2->tp_nsec = ts.tv_nsec; in __packet_set_timestamp()
458 h.h3->tp_sec = ts.tv_sec; in __packet_set_timestamp()
459 h.h3->tp_nsec = ts.tv_nsec; in __packet_set_timestamp()
467 flush_dcache_page(pgv_to_page(&h.h1->tp_sec)); in __packet_set_timestamp()
481 pg_vec_pos = position / rb->frames_per_block; in packet_lookup_frame()
482 frame_offset = position % rb->frames_per_block; in packet_lookup_frame()
484 h.raw = rb->pg_vec[pg_vec_pos].buffer + in packet_lookup_frame()
485 (frame_offset * rb->frame_size); in packet_lookup_frame()
497 return packet_lookup_frame(po, rb, rb->head, status); in packet_current_frame()
502 del_timer_sync(&pkc->retire_blk_timer); in prb_del_retire_blk_timer()
510 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); in prb_shutdown_retire_blk_timer()
512 spin_lock_bh(&rb_queue->lock); in prb_shutdown_retire_blk_timer()
513 pkc->delete_blk_timer = 1; in prb_shutdown_retire_blk_timer()
514 spin_unlock_bh(&rb_queue->lock); in prb_shutdown_retire_blk_timer()
523 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); in prb_setup_retire_blk_timer()
524 timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired, in prb_setup_retire_blk_timer()
526 pkc->retire_blk_timer.expires = jiffies; in prb_setup_retire_blk_timer()
533 unsigned int mbits, div; in prb_calc_retire_blk_tmo() local
538 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); in prb_calc_retire_blk_tmo()
555 div = ecmd.base.speed / 1000; in prb_calc_retire_blk_tmo()
558 if (div) in prb_calc_retire_blk_tmo()
559 mbits /= div; in prb_calc_retire_blk_tmo()
561 if (div) in prb_calc_retire_blk_tmo()
569 p1->feature_req_word = req_u->req3.tp_feature_req_word; in prb_init_ft_ops()
582 p1->knxt_seq_num = 1; in init_prb_bdqc()
583 p1->pkbdq = pg_vec; in init_prb_bdqc()
585 p1->pkblk_start = pg_vec[0].buffer; in init_prb_bdqc()
586 p1->kblk_size = req_u->req3.tp_block_size; in init_prb_bdqc()
587 p1->knum_blocks = req_u->req3.tp_block_nr; in init_prb_bdqc()
588 p1->hdrlen = po->tp_hdrlen; in init_prb_bdqc()
589 p1->version = po->tp_version; in init_prb_bdqc()
590 p1->last_kactive_blk_num = 0; in init_prb_bdqc()
591 po->stats.stats3.tp_freeze_q_cnt = 0; in init_prb_bdqc()
592 if (req_u->req3.tp_retire_blk_tov) in init_prb_bdqc()
593 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov; in init_prb_bdqc()
595 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po, in init_prb_bdqc()
596 req_u->req3.tp_block_size); in init_prb_bdqc()
597 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); in init_prb_bdqc()
598 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; in init_prb_bdqc()
599 rwlock_init(&p1->blk_fill_in_prog_lock); in init_prb_bdqc()
601 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); in init_prb_bdqc()
612 mod_timer(&pkc->retire_blk_timer, in _prb_refresh_rx_retire_blk_timer()
613 jiffies + pkc->tov_in_jiffies); in _prb_refresh_rx_retire_blk_timer()
614 pkc->last_kactive_blk_num = pkc->kactive_blk_num; in _prb_refresh_rx_retire_blk_timer()
621 * on packet-by-packet basis.
623 * With a 1MB block-size, on a 1Gbps line, it will take
636 * a) line-speed and b) block-size.
644 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); in prb_retire_rx_blk_timer_expired()
648 spin_lock(&po->sk.sk_receive_queue.lock); in prb_retire_rx_blk_timer_expired()
653 if (unlikely(pkc->delete_blk_timer)) in prb_retire_rx_blk_timer_expired()
667 write_lock(&pkc->blk_fill_in_prog_lock); in prb_retire_rx_blk_timer_expired()
668 write_unlock(&pkc->blk_fill_in_prog_lock); in prb_retire_rx_blk_timer_expired()
671 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { in prb_retire_rx_blk_timer_expired()
683 /* Case 1. Queue was frozen because user-space was in prb_retire_rx_blk_timer_expired()
688 * Ok, user-space is still behind. in prb_retire_rx_blk_timer_expired()
693 /* Case 2. queue was frozen,user-space caught up, in prb_retire_rx_blk_timer_expired()
698 * Thawing/timer-refresh is a side effect. in prb_retire_rx_blk_timer_expired()
710 spin_unlock(&po->sk.sk_receive_queue.lock); in prb_retire_rx_blk_timer_expired()
726 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end); in prb_flush_block()
763 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; in prb_close_block()
764 struct sock *sk = &po->sk; in prb_close_block()
766 if (atomic_read(&po->tp_drops)) in prb_close_block()
769 last_pkt = (struct tpacket3_hdr *)pkc1->prev; in prb_close_block()
770 last_pkt->tp_next_offset = 0; in prb_close_block()
774 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; in prb_close_block()
775 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; in prb_close_block()
777 /* Ok, we tmo'd - so get the current time. in prb_close_block()
784 h1->ts_last_pkt.ts_sec = ts.tv_sec; in prb_close_block()
785 h1->ts_last_pkt.ts_nsec = ts.tv_nsec; in prb_close_block()
793 sk->sk_data_ready(sk); in prb_close_block()
795 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); in prb_close_block()
800 pkc->reset_pending_on_curr_blk = 0; in prb_thaw_queue()
814 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; in prb_open_block()
822 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; in prb_open_block()
824 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); in prb_open_block()
828 h1->ts_first_pkt.ts_sec = ts.tv_sec; in prb_open_block()
829 h1->ts_first_pkt.ts_nsec = ts.tv_nsec; in prb_open_block()
831 pkc1->pkblk_start = (char *)pbd1; in prb_open_block()
832 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); in prb_open_block()
834 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); in prb_open_block()
837 pbd1->version = pkc1->version; in prb_open_block()
838 pkc1->prev = pkc1->nxt_offset; in prb_open_block()
839 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; in prb_open_block()
852 * 4) user-space is either sleeping or processing block '0'.
854 * it will close block-7,loop around and try to fill block '0'.
855 * call-flow:
859 * |->(BLOCK_STATUS == USER) evaluates to true
860 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
865 * re-open block-0 in near future.
867 * case and __packet_lookup_frame_in_block will check if block-0
868 * is free and can now be re-used.
873 pkc->reset_pending_on_curr_blk = 1; in prb_freeze_queue()
874 po->stats.stats3.tp_freeze_q_cnt++; in prb_freeze_queue()
907 return (void *)pkc->nxt_offset; in prb_dispatch_next_block()
919 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't in prb_retire_current_block()
924 * the timer-handler already handled this case. in prb_retire_current_block()
928 write_lock(&pkc->blk_fill_in_prog_lock); in prb_retire_current_block()
929 write_unlock(&pkc->blk_fill_in_prog_lock); in prb_retire_current_block()
943 return pkc->reset_pending_on_curr_blk; in prb_queue_frozen()
947 __releases(&pkc->blk_fill_in_prog_lock) in prb_clear_blk_fill_status()
951 read_unlock(&pkc->blk_fill_in_prog_lock); in prb_clear_blk_fill_status()
957 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb); in prb_fill_rxhash()
963 ppd->hv1.tp_rxhash = 0; in prb_clear_rxhash()
969 if (skb_vlan_tag_present(pkc->skb)) { in prb_fill_vlan_info()
970 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb); in prb_fill_vlan_info()
971 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); in prb_fill_vlan_info()
972 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; in prb_fill_vlan_info()
974 ppd->hv1.tp_vlan_tci = 0; in prb_fill_vlan_info()
975 ppd->hv1.tp_vlan_tpid = 0; in prb_fill_vlan_info()
976 ppd->tp_status = TP_STATUS_AVAILABLE; in prb_fill_vlan_info()
983 ppd->hv1.tp_padding = 0; in prb_run_all_ft_ops()
986 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH) in prb_run_all_ft_ops()
996 __acquires(&pkc->blk_fill_in_prog_lock) in prb_fill_curr_block()
1001 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len); in prb_fill_curr_block()
1002 pkc->prev = curr; in prb_fill_curr_block()
1003 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len); in prb_fill_curr_block()
1006 read_lock(&pkc->blk_fill_in_prog_lock); in prb_fill_curr_block()
1010 /* Assumes caller has the sk->rx_queue.lock */
1020 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); in __packet_lookup_frame_in_block()
1027 * is still in_use by user-space. in __packet_lookup_frame_in_block()
1034 * Ok, the block was released by user-space. in __packet_lookup_frame_in_block()
1044 curr = pkc->nxt_offset; in __packet_lookup_frame_in_block()
1045 pkc->skb = skb; in __packet_lookup_frame_in_block()
1046 end = (char *)pbd + pkc->kblk_size; in __packet_lookup_frame_in_block()
1077 switch (po->tp_version) { in packet_current_rx_frame()
1080 curr = packet_lookup_frame(po, &po->rx_ring, in packet_current_rx_frame()
1081 po->rx_ring.head, status); in packet_current_rx_frame()
1108 if (rb->prb_bdqc.kactive_blk_num) in prb_previous_blk_num()
1109 prev = rb->prb_bdqc.kactive_blk_num-1; in prb_previous_blk_num()
1111 prev = rb->prb_bdqc.knum_blocks-1; in prb_previous_blk_num()
1128 if (po->tp_version <= TPACKET_V2) in packet_previous_rx_frame()
1137 switch (po->tp_version) { in packet_increment_rx_head()
1153 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; in packet_previous_frame()
1159 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; in packet_increment_head()
1164 this_cpu_inc(*rb->pending_refcnt); in packet_inc_pending()
1169 this_cpu_dec(*rb->pending_refcnt); in packet_dec_pending()
1178 if (rb->pending_refcnt == NULL) in packet_read_pending()
1182 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); in packet_read_pending()
1189 po->rx_ring.pending_refcnt = NULL; in packet_alloc_pending()
1191 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int); in packet_alloc_pending()
1192 if (unlikely(po->tx_ring.pending_refcnt == NULL)) in packet_alloc_pending()
1193 return -ENOBUFS; in packet_alloc_pending()
1200 free_percpu(po->tx_ring.pending_refcnt); in packet_free_pending()
1212 len = READ_ONCE(po->rx_ring.frame_max) + 1; in __tpacket_has_room()
1213 idx = READ_ONCE(po->rx_ring.head); in __tpacket_has_room()
1217 idx -= len; in __tpacket_has_room()
1218 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL); in __tpacket_has_room()
1225 len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks); in __tpacket_v3_has_room()
1226 idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num); in __tpacket_v3_has_room()
1230 idx -= len; in __tpacket_v3_has_room()
1231 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL); in __tpacket_v3_has_room()
1237 const struct sock *sk = &po->sk; in __packet_rcv_has_room()
1240 if (po->prot_hook.func != tpacket_rcv) { in __packet_rcv_has_room()
1241 int rcvbuf = READ_ONCE(sk->sk_rcvbuf); in __packet_rcv_has_room()
1242 int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc) in __packet_rcv_has_room()
1243 - (skb ? skb->truesize : 0); in __packet_rcv_has_room()
1253 if (po->tp_version == TPACKET_V3) { in __packet_rcv_has_room()
1275 if (READ_ONCE(po->pressure) != pressure) in packet_rcv_has_room()
1276 WRITE_ONCE(po->pressure, pressure); in packet_rcv_has_room()
1283 if (READ_ONCE(po->pressure) && in packet_rcv_try_clear_pressure()
1285 WRITE_ONCE(po->pressure, 0); in packet_rcv_try_clear_pressure()
1290 skb_queue_purge(&sk->sk_error_queue); in packet_sock_destruct()
1292 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); in packet_sock_destruct()
1293 WARN_ON(refcount_read(&sk->sk_wmem_alloc)); in packet_sock_destruct()
1305 u32 *history = po->rollover->history; in fanout_flow_is_huge()
1334 unsigned int val = atomic_inc_return(&f->rr_cur); in fanout_demux_lb()
1361 po = pkt_sk(f->arr[idx]); in fanout_demux_rollover()
1371 i = j = min_t(int, po->rollover->sock, num - 1); in fanout_demux_rollover()
1373 po_next = pkt_sk(f->arr[i]); in fanout_demux_rollover()
1374 if (po_next != po_skip && !READ_ONCE(po_next->pressure) && in fanout_demux_rollover()
1377 po->rollover->sock = i; in fanout_demux_rollover()
1378 atomic_long_inc(&po->rollover->num); in fanout_demux_rollover()
1380 atomic_long_inc(&po->rollover->num_huge); in fanout_demux_rollover()
1388 atomic_long_inc(&po->rollover->num_failed); in fanout_demux_rollover()
1407 prog = rcu_dereference(f->bpf_prog); in fanout_demux_bpf()
1417 return f->flags & (flag >> 8); in fanout_has_flag()
1423 struct packet_fanout *f = pt->af_packet_priv; in packet_rcv_fanout()
1424 unsigned int num = READ_ONCE(f->num_members); in packet_rcv_fanout()
1425 struct net *net = read_pnet(&f->net); in packet_rcv_fanout()
1439 switch (f->type) { in packet_rcv_fanout()
1468 po = pkt_sk(f->arr[idx]); in packet_rcv_fanout()
1469 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); in packet_rcv_fanout()
1479 struct packet_fanout *f = po->fanout; in __fanout_link()
1481 spin_lock(&f->lock); in __fanout_link()
1482 f->arr[f->num_members] = sk; in __fanout_link()
1484 f->num_members++; in __fanout_link()
1485 if (f->num_members == 1) in __fanout_link()
1486 dev_add_pack(&f->prot_hook); in __fanout_link()
1487 spin_unlock(&f->lock); in __fanout_link()
1492 struct packet_fanout *f = po->fanout; in __fanout_unlink()
1495 spin_lock(&f->lock); in __fanout_unlink()
1496 for (i = 0; i < f->num_members; i++) { in __fanout_unlink()
1497 if (f->arr[i] == sk) in __fanout_unlink()
1500 BUG_ON(i >= f->num_members); in __fanout_unlink()
1501 f->arr[i] = f->arr[f->num_members - 1]; in __fanout_unlink()
1502 f->num_members--; in __fanout_unlink()
1503 if (f->num_members == 0) in __fanout_unlink()
1504 __dev_remove_pack(&f->prot_hook); in __fanout_unlink()
1505 spin_unlock(&f->lock); in __fanout_unlink()
1510 if (sk->sk_family != PF_PACKET) in match_fanout_group()
1513 return ptype->af_packet_priv == pkt_sk(sk)->fanout; in match_fanout_group()
1518 switch (f->type) { in fanout_init_data()
1520 atomic_set(&f->rr_cur, 0); in fanout_init_data()
1524 RCU_INIT_POINTER(f->bpf_prog, NULL); in fanout_init_data()
1533 spin_lock(&f->lock); in __fanout_set_data_bpf()
1534 old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock)); in __fanout_set_data_bpf()
1535 rcu_assign_pointer(f->bpf_prog, new); in __fanout_set_data_bpf()
1536 spin_unlock(&f->lock); in __fanout_set_data_bpf()
1551 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) in fanout_set_data_cbpf()
1552 return -EPERM; in fanout_set_data_cbpf()
1562 __fanout_set_data_bpf(po->fanout, new); in fanout_set_data_cbpf()
1572 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) in fanout_set_data_ebpf()
1573 return -EPERM; in fanout_set_data_ebpf()
1575 return -EINVAL; in fanout_set_data_ebpf()
1577 return -EFAULT; in fanout_set_data_ebpf()
1583 __fanout_set_data_bpf(po->fanout, new); in fanout_set_data_ebpf()
1590 switch (po->fanout->type) { in fanout_set_data()
1596 return -EINVAL; in fanout_set_data()
1602 switch (f->type) { in fanout_release_data()
1614 if (f->id == candidate_id && in __fanout_id_is_free()
1615 read_pnet(&f->net) == sock_net(sk)) { in __fanout_id_is_free()
1651 return -EINVAL; in fanout_add()
1661 return -EINVAL; in fanout_add()
1666 err = -EALREADY; in fanout_add()
1667 if (po->fanout) in fanout_add()
1672 err = -ENOMEM; in fanout_add()
1676 atomic_long_set(&rollover->num, 0); in fanout_add()
1677 atomic_long_set(&rollover->num_huge, 0); in fanout_add()
1678 atomic_long_set(&rollover->num_failed, 0); in fanout_add()
1683 err = -EINVAL; in fanout_add()
1687 err = -ENOMEM; in fanout_add()
1696 if (f->id == id && in fanout_add()
1697 read_pnet(&f->net) == sock_net(sk)) { in fanout_add()
1702 err = -EINVAL; in fanout_add()
1703 if (match && match->flags != flags) in fanout_add()
1706 err = -ENOMEM; in fanout_add()
1710 write_pnet(&match->net, sock_net(sk)); in fanout_add()
1711 match->id = id; in fanout_add()
1712 match->type = type; in fanout_add()
1713 match->flags = flags; in fanout_add()
1714 INIT_LIST_HEAD(&match->list); in fanout_add()
1715 spin_lock_init(&match->lock); in fanout_add()
1716 refcount_set(&match->sk_ref, 0); in fanout_add()
1718 match->prot_hook.type = po->prot_hook.type; in fanout_add()
1719 match->prot_hook.dev = po->prot_hook.dev; in fanout_add()
1720 match->prot_hook.func = packet_rcv_fanout; in fanout_add()
1721 match->prot_hook.af_packet_priv = match; in fanout_add()
1722 match->prot_hook.id_match = match_fanout_group; in fanout_add()
1723 list_add(&match->list, &fanout_list); in fanout_add()
1725 err = -EINVAL; in fanout_add()
1727 spin_lock(&po->bind_lock); in fanout_add()
1728 if (po->running && in fanout_add()
1729 match->type == type && in fanout_add()
1730 match->prot_hook.type == po->prot_hook.type && in fanout_add()
1731 match->prot_hook.dev == po->prot_hook.dev) { in fanout_add()
1732 err = -ENOSPC; in fanout_add()
1733 if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) { in fanout_add()
1734 __dev_remove_pack(&po->prot_hook); in fanout_add()
1735 po->fanout = match; in fanout_add()
1736 po->rollover = rollover; in fanout_add()
1738 refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); in fanout_add()
1743 spin_unlock(&po->bind_lock); in fanout_add()
1745 if (err && !refcount_read(&match->sk_ref)) { in fanout_add()
1746 list_del(&match->list); in fanout_add()
1756 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1757 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1767 f = po->fanout; in fanout_release()
1769 po->fanout = NULL; in fanout_release()
1771 if (refcount_dec_and_test(&f->sk_ref)) in fanout_release()
1772 list_del(&f->list); in fanout_release()
1784 /* Earlier code assumed this would be a VLAN pkt, double-check in packet_extra_vlan_len_allowed()
1788 if (unlikely(dev->type != ARPHRD_ETHER)) in packet_extra_vlan_len_allowed()
1792 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)); in packet_extra_vlan_len_allowed()
1810 sk = pt->af_packet_priv; in packet_rcv_spkt()
1819 * For outgoing ones skb->data == skb_mac_header(skb) in packet_rcv_spkt()
1823 if (skb->pkt_type == PACKET_LOOPBACK) in packet_rcv_spkt()
1839 spkt = &PACKET_SKB_CB(skb)->sa.pkt; in packet_rcv_spkt()
1841 skb_push(skb, skb->data - skb_mac_header(skb)); in packet_rcv_spkt()
1847 spkt->spkt_family = dev->type; in packet_rcv_spkt()
1848 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); in packet_rcv_spkt()
1849 spkt->spkt_protocol = skb->protocol; in packet_rcv_spkt()
1867 if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) && in packet_parse_headers()
1868 sock->type == SOCK_RAW) { in packet_parse_headers()
1870 skb->protocol = dev_parse_header_protocol(skb); in packet_parse_headers()
1884 struct sock *sk = sock->sk; in packet_sendmsg_spkt()
1885 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name); in packet_sendmsg_spkt()
1898 if (msg->msg_namelen < sizeof(struct sockaddr)) in packet_sendmsg_spkt()
1899 return -EINVAL; in packet_sendmsg_spkt()
1900 if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) in packet_sendmsg_spkt()
1901 proto = saddr->spkt_protocol; in packet_sendmsg_spkt()
1903 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ in packet_sendmsg_spkt()
1909 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0; in packet_sendmsg_spkt()
1912 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); in packet_sendmsg_spkt()
1913 err = -ENODEV; in packet_sendmsg_spkt()
1917 err = -ENETDOWN; in packet_sendmsg_spkt()
1918 if (!(dev->flags & IFF_UP)) in packet_sendmsg_spkt()
1928 err = -EPROTONOSUPPORT; in packet_sendmsg_spkt()
1934 err = -EMSGSIZE; in packet_sendmsg_spkt()
1935 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len) in packet_sendmsg_spkt()
1940 int tlen = dev->needed_tailroom; in packet_sendmsg_spkt()
1941 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; in packet_sendmsg_spkt()
1946 return -ENOBUFS; in packet_sendmsg_spkt()
1956 skb->data -= hhlen; in packet_sendmsg_spkt()
1957 skb->tail -= hhlen; in packet_sendmsg_spkt()
1967 if (!dev_validate_header(dev, skb->data, len)) { in packet_sendmsg_spkt()
1968 err = -EINVAL; in packet_sendmsg_spkt()
1971 if (len > (dev->mtu + dev->hard_header_len + extra_len) && in packet_sendmsg_spkt()
1973 err = -EMSGSIZE; in packet_sendmsg_spkt()
1978 if (msg->msg_controllen) { in packet_sendmsg_spkt()
1984 skb->protocol = proto; in packet_sendmsg_spkt()
1985 skb->dev = dev; in packet_sendmsg_spkt()
1986 skb->priority = sk->sk_priority; in packet_sendmsg_spkt()
1987 skb->mark = sk->sk_mark; in packet_sendmsg_spkt()
1988 skb->tstamp = sockc.transmit_time; in packet_sendmsg_spkt()
1993 skb->no_fcs = 1; in packet_sendmsg_spkt()
2015 filter = rcu_dereference(sk->sk_filter); in run_filter()
2017 res = bpf_prog_run_clear_cb(filter->prog, skb); in run_filter()
2029 return -EINVAL; in packet_rcv_vnet()
2030 *len -= sizeof(vnet_hdr); in packet_rcv_vnet()
2033 return -EINVAL; in packet_rcv_vnet()
2042 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2043 * and skb->cb are mangled. It works because (and until) packets
2056 u8 *skb_head = skb->data; in packet_rcv()
2057 int skb_len = skb->len; in packet_rcv()
2061 if (skb->pkt_type == PACKET_LOOPBACK) in packet_rcv()
2064 sk = pt->af_packet_priv; in packet_rcv()
2070 skb->dev = dev; in packet_rcv()
2080 if (sk->sk_type != SOCK_DGRAM) in packet_rcv()
2081 skb_push(skb, skb->data - skb_mac_header(skb)); in packet_rcv()
2082 else if (skb->pkt_type == PACKET_OUTGOING) { in packet_rcv()
2088 snaplen = skb->len; in packet_rcv()
2096 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) in packet_rcv()
2104 if (skb_head != skb->data) { in packet_rcv()
2105 skb->data = skb_head; in packet_rcv()
2106 skb->len = skb_len; in packet_rcv()
2112 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8); in packet_rcv()
2114 sll = &PACKET_SKB_CB(skb)->sa.ll; in packet_rcv()
2115 sll->sll_hatype = dev->type; in packet_rcv()
2116 sll->sll_pkttype = skb->pkt_type; in packet_rcv()
2117 if (unlikely(po->origdev)) in packet_rcv()
2118 sll->sll_ifindex = orig_dev->ifindex; in packet_rcv()
2120 sll->sll_ifindex = dev->ifindex; in packet_rcv()
2122 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); in packet_rcv()
2124 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg(). in packet_rcv()
2127 PACKET_SKB_CB(skb)->sa.origlen = skb->len; in packet_rcv()
2133 skb->dev = NULL; in packet_rcv()
2139 spin_lock(&sk->sk_receive_queue.lock); in packet_rcv()
2140 po->stats.stats1.tp_packets++; in packet_rcv()
2142 __skb_queue_tail(&sk->sk_receive_queue, skb); in packet_rcv()
2143 spin_unlock(&sk->sk_receive_queue.lock); in packet_rcv()
2144 sk->sk_data_ready(sk); in packet_rcv()
2149 atomic_inc(&po->tp_drops); in packet_rcv()
2150 atomic_inc(&sk->sk_drops); in packet_rcv()
2153 if (skb_head != skb->data && skb_shared(skb)) { in packet_rcv()
2154 skb->data = skb_head; in packet_rcv()
2155 skb->len = skb_len; in packet_rcv()
2172 u8 *skb_head = skb->data; in tpacket_rcv()
2173 int skb_len = skb->len; in tpacket_rcv()
2192 if (skb->pkt_type == PACKET_LOOPBACK) in tpacket_rcv()
2195 sk = pt->af_packet_priv; in tpacket_rcv()
2202 if (sk->sk_type != SOCK_DGRAM) in tpacket_rcv()
2203 skb_push(skb, skb->data - skb_mac_header(skb)); in tpacket_rcv()
2204 else if (skb->pkt_type == PACKET_OUTGOING) { in tpacket_rcv()
2210 snaplen = skb->len; in tpacket_rcv()
2218 atomic_inc(&po->tp_drops); in tpacket_rcv()
2222 if (skb->ip_summed == CHECKSUM_PARTIAL) in tpacket_rcv()
2224 else if (skb->pkt_type != PACKET_OUTGOING && in tpacket_rcv()
2225 (skb->ip_summed == CHECKSUM_COMPLETE || in tpacket_rcv()
2232 if (sk->sk_type == SOCK_DGRAM) { in tpacket_rcv()
2233 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + in tpacket_rcv()
2234 po->tp_reserve; in tpacket_rcv()
2237 netoff = TPACKET_ALIGN(po->tp_hdrlen + in tpacket_rcv()
2239 po->tp_reserve; in tpacket_rcv()
2240 if (po->has_vnet_hdr) { in tpacket_rcv()
2244 macoff = netoff - maclen; in tpacket_rcv()
2247 atomic_inc(&po->tp_drops); in tpacket_rcv()
2250 if (po->tp_version <= TPACKET_V2) { in tpacket_rcv()
2251 if (macoff + snaplen > po->rx_ring.frame_size) { in tpacket_rcv()
2252 if (po->copy_thresh && in tpacket_rcv()
2253 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { in tpacket_rcv()
2258 skb_head = skb->data; in tpacket_rcv()
2263 snaplen = po->rx_ring.frame_size - macoff; in tpacket_rcv()
2270 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { in tpacket_rcv()
2273 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; in tpacket_rcv()
2279 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; in tpacket_rcv()
2283 spin_lock(&sk->sk_receive_queue.lock); in tpacket_rcv()
2289 if (po->tp_version <= TPACKET_V2) { in tpacket_rcv()
2290 slot_id = po->rx_ring.head; in tpacket_rcv()
2291 if (test_bit(slot_id, po->rx_ring.rx_owner_map)) in tpacket_rcv()
2293 __set_bit(slot_id, po->rx_ring.rx_owner_map); in tpacket_rcv()
2297 virtio_net_hdr_from_skb(skb, h.raw + macoff - in tpacket_rcv()
2300 if (po->tp_version == TPACKET_V3) in tpacket_rcv()
2301 prb_clear_blk_fill_status(&po->rx_ring); in tpacket_rcv()
2305 if (po->tp_version <= TPACKET_V2) { in tpacket_rcv()
2306 packet_increment_rx_head(po, &po->rx_ring); in tpacket_rcv()
2309 * because it's COR - Clear On Read. in tpacket_rcv()
2313 if (atomic_read(&po->tp_drops)) in tpacket_rcv()
2317 po->stats.stats1.tp_packets++; in tpacket_rcv()
2320 __skb_queue_tail(&sk->sk_receive_queue, copy_skb); in tpacket_rcv()
2322 spin_unlock(&sk->sk_receive_queue.lock); in tpacket_rcv()
2326 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) in tpacket_rcv()
2331 switch (po->tp_version) { in tpacket_rcv()
2333 h.h1->tp_len = skb->len; in tpacket_rcv()
2334 h.h1->tp_snaplen = snaplen; in tpacket_rcv()
2335 h.h1->tp_mac = macoff; in tpacket_rcv()
2336 h.h1->tp_net = netoff; in tpacket_rcv()
2337 h.h1->tp_sec = ts.tv_sec; in tpacket_rcv()
2338 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; in tpacket_rcv()
2342 h.h2->tp_len = skb->len; in tpacket_rcv()
2343 h.h2->tp_snaplen = snaplen; in tpacket_rcv()
2344 h.h2->tp_mac = macoff; in tpacket_rcv()
2345 h.h2->tp_net = netoff; in tpacket_rcv()
2346 h.h2->tp_sec = ts.tv_sec; in tpacket_rcv()
2347 h.h2->tp_nsec = ts.tv_nsec; in tpacket_rcv()
2349 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); in tpacket_rcv()
2350 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); in tpacket_rcv()
2353 h.h2->tp_vlan_tci = 0; in tpacket_rcv()
2354 h.h2->tp_vlan_tpid = 0; in tpacket_rcv()
2356 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding)); in tpacket_rcv()
2363 h.h3->tp_status |= status; in tpacket_rcv()
2364 h.h3->tp_len = skb->len; in tpacket_rcv()
2365 h.h3->tp_snaplen = snaplen; in tpacket_rcv()
2366 h.h3->tp_mac = macoff; in tpacket_rcv()
2367 h.h3->tp_net = netoff; in tpacket_rcv()
2368 h.h3->tp_sec = ts.tv_sec; in tpacket_rcv()
2369 h.h3->tp_nsec = ts.tv_nsec; in tpacket_rcv()
2370 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding)); in tpacket_rcv()
2378 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); in tpacket_rcv()
2379 sll->sll_family = AF_PACKET; in tpacket_rcv()
2380 sll->sll_hatype = dev->type; in tpacket_rcv()
2381 sll->sll_protocol = skb->protocol; in tpacket_rcv()
2382 sll->sll_pkttype = skb->pkt_type; in tpacket_rcv()
2383 if (unlikely(po->origdev)) in tpacket_rcv()
2384 sll->sll_ifindex = orig_dev->ifindex; in tpacket_rcv()
2386 sll->sll_ifindex = dev->ifindex; in tpacket_rcv()
2391 if (po->tp_version <= TPACKET_V2) { in tpacket_rcv()
2403 if (po->tp_version <= TPACKET_V2) { in tpacket_rcv()
2404 spin_lock(&sk->sk_receive_queue.lock); in tpacket_rcv()
2406 __clear_bit(slot_id, po->rx_ring.rx_owner_map); in tpacket_rcv()
2407 spin_unlock(&sk->sk_receive_queue.lock); in tpacket_rcv()
2408 sk->sk_data_ready(sk); in tpacket_rcv()
2409 } else if (po->tp_version == TPACKET_V3) { in tpacket_rcv()
2410 prb_clear_blk_fill_status(&po->rx_ring); in tpacket_rcv()
2414 if (skb_head != skb->data && skb_shared(skb)) { in tpacket_rcv()
2415 skb->data = skb_head; in tpacket_rcv()
2416 skb->len = skb_len; in tpacket_rcv()
2426 spin_unlock(&sk->sk_receive_queue.lock); in tpacket_rcv()
2427 atomic_inc(&po->tp_drops); in tpacket_rcv()
2430 sk->sk_data_ready(sk); in tpacket_rcv()
2437 struct packet_sock *po = pkt_sk(skb->sk); in tpacket_destruct_skb()
2439 if (likely(po->tx_ring.pg_vec)) { in tpacket_destruct_skb()
2444 packet_dec_pending(&po->tx_ring); in tpacket_destruct_skb()
2449 if (!packet_read_pending(&po->tx_ring)) in tpacket_destruct_skb()
2450 complete(&po->skb_completion); in tpacket_destruct_skb()
2458 if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && in __packet_snd_vnet_parse()
2459 (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + in __packet_snd_vnet_parse()
2460 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 > in __packet_snd_vnet_parse()
2461 __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len))) in __packet_snd_vnet_parse()
2462 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(), in __packet_snd_vnet_parse()
2463 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + in __packet_snd_vnet_parse()
2464 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2); in __packet_snd_vnet_parse()
2466 if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len) in __packet_snd_vnet_parse()
2467 return -EINVAL; in __packet_snd_vnet_parse()
2476 return -EINVAL; in packet_snd_vnet_parse()
2477 *len -= sizeof(*vnet_hdr); in packet_snd_vnet_parse()
2479 if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter)) in packet_snd_vnet_parse()
2480 return -EFAULT; in packet_snd_vnet_parse()
2492 struct socket *sock = po->sk.sk_socket; in tpacket_fill_skb()
2498 skb->protocol = proto; in tpacket_fill_skb()
2499 skb->dev = dev; in tpacket_fill_skb()
2500 skb->priority = po->sk.sk_priority; in tpacket_fill_skb()
2501 skb->mark = po->sk.sk_mark; in tpacket_fill_skb()
2502 skb->tstamp = sockc->transmit_time; in tpacket_fill_skb()
2503 skb_setup_tx_timestamp(skb, sockc->tsflags); in tpacket_fill_skb()
2511 if (sock->type == SOCK_DGRAM) { in tpacket_fill_skb()
2515 return -EINVAL; in tpacket_fill_skb()
2519 skb_push(skb, dev->hard_header_len); in tpacket_fill_skb()
2520 skb_put(skb, copylen - dev->hard_header_len); in tpacket_fill_skb()
2524 if (!dev_validate_header(dev, skb->data, hdrlen)) in tpacket_fill_skb()
2525 return -EINVAL; in tpacket_fill_skb()
2528 to_write -= hdrlen; in tpacket_fill_skb()
2532 len_max = PAGE_SIZE - offset; in tpacket_fill_skb()
2535 skb->data_len = to_write; in tpacket_fill_skb()
2536 skb->len += to_write; in tpacket_fill_skb()
2537 skb->truesize += to_write; in tpacket_fill_skb()
2538 refcount_add(to_write, &po->sk.sk_wmem_alloc); in tpacket_fill_skb()
2541 nr_frags = skb_shinfo(skb)->nr_frags; in tpacket_fill_skb()
2546 return -EFAULT; in tpacket_fill_skb()
2554 to_write -= len; in tpacket_fill_skb()
2573 switch (po->tp_version) { in tpacket_parse_header()
2575 if (ph.h3->tp_next_offset != 0) { in tpacket_parse_header()
2577 return -EINVAL; in tpacket_parse_header()
2579 tp_len = ph.h3->tp_len; in tpacket_parse_header()
2582 tp_len = ph.h2->tp_len; in tpacket_parse_header()
2585 tp_len = ph.h1->tp_len; in tpacket_parse_header()
2590 return -EMSGSIZE; in tpacket_parse_header()
2593 if (unlikely(po->tp_tx_has_off)) { in tpacket_parse_header()
2596 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); in tpacket_parse_header()
2597 off_max = po->tx_ring.frame_size - tp_len; in tpacket_parse_header()
2598 if (po->sk.sk_type == SOCK_DGRAM) { in tpacket_parse_header()
2599 switch (po->tp_version) { in tpacket_parse_header()
2601 off = ph.h3->tp_net; in tpacket_parse_header()
2604 off = ph.h2->tp_net; in tpacket_parse_header()
2607 off = ph.h1->tp_net; in tpacket_parse_header()
2611 switch (po->tp_version) { in tpacket_parse_header()
2613 off = ph.h3->tp_mac; in tpacket_parse_header()
2616 off = ph.h2->tp_mac; in tpacket_parse_header()
2619 off = ph.h1->tp_mac; in tpacket_parse_header()
2624 return -EINVAL; in tpacket_parse_header()
2626 off = po->tp_hdrlen - sizeof(struct sockaddr_ll); in tpacket_parse_header()
2642 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); in tpacket_snd()
2643 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT); in tpacket_snd()
2652 mutex_lock(&po->pg_vec_lock); in tpacket_snd()
2657 if (unlikely(!po->tx_ring.pg_vec)) { in tpacket_snd()
2658 err = -EBUSY; in tpacket_snd()
2663 proto = po->num; in tpacket_snd()
2665 err = -EINVAL; in tpacket_snd()
2666 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) in tpacket_snd()
2668 if (msg->msg_namelen < (saddr->sll_halen in tpacket_snd()
2672 proto = saddr->sll_protocol; in tpacket_snd()
2673 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); in tpacket_snd()
2674 if (po->sk.sk_socket->type == SOCK_DGRAM) { in tpacket_snd()
2675 if (dev && msg->msg_namelen < dev->addr_len + in tpacket_snd()
2678 addr = saddr->sll_addr; in tpacket_snd()
2682 err = -ENXIO; in tpacket_snd()
2685 err = -ENETDOWN; in tpacket_snd()
2686 if (unlikely(!(dev->flags & IFF_UP))) in tpacket_snd()
2689 sockcm_init(&sockc, &po->sk); in tpacket_snd()
2690 if (msg->msg_controllen) { in tpacket_snd()
2691 err = sock_cmsg_send(&po->sk, msg, &sockc); in tpacket_snd()
2696 if (po->sk.sk_socket->type == SOCK_RAW) in tpacket_snd()
2697 reserve = dev->hard_header_len; in tpacket_snd()
2698 size_max = po->tx_ring.frame_size in tpacket_snd()
2699 - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); in tpacket_snd()
2701 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr) in tpacket_snd()
2702 size_max = dev->mtu + reserve + VLAN_HLEN; in tpacket_snd()
2704 reinit_completion(&po->skb_completion); in tpacket_snd()
2707 ph = packet_current_frame(po, &po->tx_ring, in tpacket_snd()
2711 timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT); in tpacket_snd()
2712 timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo); in tpacket_snd()
2714 err = !timeo ? -ETIMEDOUT : -ERESTARTSYS; in tpacket_snd()
2729 tlen = dev->needed_tailroom; in tpacket_snd()
2730 if (po->has_vnet_hdr) { in tpacket_snd()
2733 tp_len -= sizeof(*vnet_hdr); in tpacket_snd()
2736 tp_len = -EINVAL; in tpacket_snd()
2740 vnet_hdr->hdr_len); in tpacket_snd()
2742 copylen = max_t(int, copylen, dev->hard_header_len); in tpacket_snd()
2743 skb = sock_alloc_send_skb(&po->sk, in tpacket_snd()
2745 (copylen - dev->hard_header_len), in tpacket_snd()
2757 tp_len > dev->mtu + reserve && in tpacket_snd()
2758 !po->has_vnet_hdr && in tpacket_snd()
2760 tp_len = -EMSGSIZE; in tpacket_snd()
2764 if (po->tp_loss) { in tpacket_snd()
2767 packet_increment_head(&po->tx_ring); in tpacket_snd()
2777 if (po->has_vnet_hdr) { in tpacket_snd()
2779 tp_len = -EINVAL; in tpacket_snd()
2785 skb->destructor = tpacket_destruct_skb; in tpacket_snd()
2787 packet_inc_pending(&po->tx_ring); in tpacket_snd()
2790 err = po->xmit(skb); in tpacket_snd()
2805 packet_increment_head(&po->tx_ring); in tpacket_snd()
2809 * to call it as it's per_cpu variable, but in fast-path in tpacket_snd()
2810 * we already short-circuit the loop with the first in tpacket_snd()
2814 (need_wait && packet_read_pending(&po->tx_ring)))); in tpacket_snd()
2825 mutex_unlock(&po->pg_vec_lock); in tpacket_snd()
2840 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, in packet_alloc_skb()
2847 skb->data_len = len - linear; in packet_alloc_skb()
2848 skb->len += len - linear; in packet_alloc_skb()
2855 struct sock *sk = sock->sk; in packet_snd()
2856 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); in packet_snd()
2876 proto = po->num; in packet_snd()
2878 err = -EINVAL; in packet_snd()
2879 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) in packet_snd()
2881 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) in packet_snd()
2883 proto = saddr->sll_protocol; in packet_snd()
2884 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); in packet_snd()
2885 if (sock->type == SOCK_DGRAM) { in packet_snd()
2886 if (dev && msg->msg_namelen < dev->addr_len + in packet_snd()
2889 addr = saddr->sll_addr; in packet_snd()
2893 err = -ENXIO; in packet_snd()
2896 err = -ENETDOWN; in packet_snd()
2897 if (unlikely(!(dev->flags & IFF_UP))) in packet_snd()
2901 sockc.mark = sk->sk_mark; in packet_snd()
2902 if (msg->msg_controllen) { in packet_snd()
2908 if (sock->type == SOCK_RAW) in packet_snd()
2909 reserve = dev->hard_header_len; in packet_snd()
2910 if (po->has_vnet_hdr) { in packet_snd()
2919 err = -EPROTONOSUPPORT; in packet_snd()
2925 err = -EMSGSIZE; in packet_snd()
2927 (len > dev->mtu + reserve + VLAN_HLEN + extra_len)) in packet_snd()
2930 err = -ENOBUFS; in packet_snd()
2932 tlen = dev->needed_tailroom; in packet_snd()
2934 linear = max(linear, min_t(int, len, dev->hard_header_len)); in packet_snd()
2936 msg->msg_flags & MSG_DONTWAIT, &err); in packet_snd()
2942 err = -EINVAL; in packet_snd()
2943 if (sock->type == SOCK_DGRAM) { in packet_snd()
2948 skb_reserve(skb, -reserve); in packet_snd()
2950 dev->min_header_len != dev->hard_header_len) in packet_snd()
2954 /* Returns -EFAULT on error */ in packet_snd()
2955 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len); in packet_snd()
2959 if (sock->type == SOCK_RAW && in packet_snd()
2960 !dev_validate_header(dev, skb->data, len)) { in packet_snd()
2961 err = -EINVAL; in packet_snd()
2967 if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) && in packet_snd()
2969 err = -EMSGSIZE; in packet_snd()
2973 skb->protocol = proto; in packet_snd()
2974 skb->dev = dev; in packet_snd()
2975 skb->priority = sk->sk_priority; in packet_snd()
2976 skb->mark = sockc.mark; in packet_snd()
2977 skb->tstamp = sockc.transmit_time; in packet_snd()
2990 skb->no_fcs = 1; in packet_snd()
2992 err = po->xmit(skb); in packet_snd()
3011 struct sock *sk = sock->sk; in packet_sendmsg()
3014 if (po->tx_ring.pg_vec) in packet_sendmsg()
3027 struct sock *sk = sock->sk; in packet_release()
3039 mutex_lock(&net->packet.sklist_lock); in packet_release()
3041 mutex_unlock(&net->packet.sklist_lock); in packet_release()
3044 sock_prot_inuse_add(net, sk->sk_prot, -1); in packet_release()
3047 spin_lock(&po->bind_lock); in packet_release()
3051 if (po->prot_hook.dev) { in packet_release()
3052 dev_put(po->prot_hook.dev); in packet_release()
3053 po->prot_hook.dev = NULL; in packet_release()
3055 spin_unlock(&po->bind_lock); in packet_release()
3060 if (po->rx_ring.pg_vec) { in packet_release()
3065 if (po->tx_ring.pg_vec) { in packet_release()
3075 kfree(po->rollover); in packet_release()
3084 sock->sk = NULL; in packet_release()
3088 skb_queue_purge(&sk->sk_receive_queue); in packet_release()
3112 spin_lock(&po->bind_lock); in packet_do_bind()
3115 if (po->fanout) { in packet_do_bind()
3116 ret = -EINVAL; in packet_do_bind()
3123 ret = -ENODEV; in packet_do_bind()
3129 ret = -ENODEV; in packet_do_bind()
3137 proto_curr = po->prot_hook.type; in packet_do_bind()
3138 dev_curr = po->prot_hook.dev; in packet_do_bind()
3143 if (po->running) { in packet_do_bind()
3148 po->num = 0; in packet_do_bind()
3151 dev_curr = po->prot_hook.dev; in packet_do_bind()
3154 dev->ifindex); in packet_do_bind()
3157 BUG_ON(po->running); in packet_do_bind()
3158 po->num = proto; in packet_do_bind()
3159 po->prot_hook.type = proto; in packet_do_bind()
3163 po->prot_hook.dev = NULL; in packet_do_bind()
3164 po->ifindex = -1; in packet_do_bind()
3167 po->prot_hook.dev = dev; in packet_do_bind()
3168 po->ifindex = dev ? dev->ifindex : 0; in packet_do_bind()
3178 if (!unlisted && (!dev || (dev->flags & IFF_UP))) { in packet_do_bind()
3181 sk->sk_err = ENETDOWN; in packet_do_bind()
3183 sk->sk_error_report(sk); in packet_do_bind()
3188 spin_unlock(&po->bind_lock); in packet_do_bind()
3200 struct sock *sk = sock->sk; in packet_bind_spkt()
3201 char name[sizeof(uaddr->sa_data) + 1]; in packet_bind_spkt()
3208 return -EINVAL; in packet_bind_spkt()
3209 /* uaddr->sa_data comes from the userspace, it's not guaranteed to be in packet_bind_spkt()
3210 * zero-terminated. in packet_bind_spkt()
3212 memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data)); in packet_bind_spkt()
3213 name[sizeof(uaddr->sa_data)] = 0; in packet_bind_spkt()
3215 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); in packet_bind_spkt()
3221 struct sock *sk = sock->sk; in packet_bind()
3228 return -EINVAL; in packet_bind()
3229 if (sll->sll_family != AF_PACKET) in packet_bind()
3230 return -EINVAL; in packet_bind()
3232 return packet_do_bind(sk, NULL, sll->sll_ifindex, in packet_bind()
3233 sll->sll_protocol ? : pkt_sk(sk)->num); in packet_bind()
3254 if (!ns_capable(net->user_ns, CAP_NET_RAW)) in packet_create()
3255 return -EPERM; in packet_create()
3256 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && in packet_create()
3257 sock->type != SOCK_PACKET) in packet_create()
3258 return -ESOCKTNOSUPPORT; in packet_create()
3260 sock->state = SS_UNCONNECTED; in packet_create()
3262 err = -ENOBUFS; in packet_create()
3267 sock->ops = &packet_ops; in packet_create()
3268 if (sock->type == SOCK_PACKET) in packet_create()
3269 sock->ops = &packet_ops_spkt; in packet_create()
3274 init_completion(&po->skb_completion); in packet_create()
3275 sk->sk_family = PF_PACKET; in packet_create()
3276 po->num = proto; in packet_create()
3277 po->xmit = dev_queue_xmit; in packet_create()
3285 sk->sk_destruct = packet_sock_destruct; in packet_create()
3292 spin_lock_init(&po->bind_lock); in packet_create()
3293 mutex_init(&po->pg_vec_lock); in packet_create()
3294 po->rollover = NULL; in packet_create()
3295 po->prot_hook.func = packet_rcv; in packet_create()
3297 if (sock->type == SOCK_PACKET) in packet_create()
3298 po->prot_hook.func = packet_rcv_spkt; in packet_create()
3300 po->prot_hook.af_packet_priv = sk; in packet_create()
3303 po->prot_hook.type = proto; in packet_create()
3307 mutex_lock(&net->packet.sklist_lock); in packet_create()
3308 sk_add_node_tail_rcu(sk, &net->packet.sklist); in packet_create()
3309 mutex_unlock(&net->packet.sklist_lock); in packet_create()
3330 struct sock *sk = sock->sk; in packet_recvmsg()
3336 err = -EINVAL; in packet_recvmsg()
3342 if (pkt_sk(sk)->ifindex < 0) in packet_recvmsg()
3343 return -ENODEV; in packet_recvmsg()
3354 * of horrible races and re-entrancy so we can forget about it in packet_recvmsg()
3374 if (pkt_sk(sk)->has_vnet_hdr) { in packet_recvmsg()
3385 copied = skb->len; in packet_recvmsg()
3388 msg->msg_flags |= MSG_TRUNC; in packet_recvmsg()
3395 if (sock->type != SOCK_PACKET) { in packet_recvmsg()
3396 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; in packet_recvmsg()
3399 origlen = PACKET_SKB_CB(skb)->sa.origlen; in packet_recvmsg()
3400 sll->sll_family = AF_PACKET; in packet_recvmsg()
3401 sll->sll_protocol = skb->protocol; in packet_recvmsg()
3406 if (msg->msg_name) { in packet_recvmsg()
3412 if (sock->type == SOCK_PACKET) { in packet_recvmsg()
3414 msg->msg_namelen = sizeof(struct sockaddr_pkt); in packet_recvmsg()
3415 copy_len = msg->msg_namelen; in packet_recvmsg()
3417 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; in packet_recvmsg()
3419 msg->msg_namelen = sll->sll_halen + in packet_recvmsg()
3421 copy_len = msg->msg_namelen; in packet_recvmsg()
3422 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) { in packet_recvmsg()
3423 memset(msg->msg_name + in packet_recvmsg()
3425 0, sizeof(sll->sll_addr)); in packet_recvmsg()
3426 msg->msg_namelen = sizeof(struct sockaddr_ll); in packet_recvmsg()
3429 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len); in packet_recvmsg()
3432 if (pkt_sk(sk)->auxdata) { in packet_recvmsg()
3436 if (skb->ip_summed == CHECKSUM_PARTIAL) in packet_recvmsg()
3438 else if (skb->pkt_type != PACKET_OUTGOING && in packet_recvmsg()
3439 (skb->ip_summed == CHECKSUM_COMPLETE || in packet_recvmsg()
3444 aux.tp_snaplen = skb->len; in packet_recvmsg()
3449 aux.tp_vlan_tpid = ntohs(skb->vlan_proto); in packet_recvmsg()
3460 * hides all the races and re-entrancy issues from us. in packet_recvmsg()
3462 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); in packet_recvmsg()
3474 struct sock *sk = sock->sk; in packet_getname_spkt()
3477 return -EOPNOTSUPP; in packet_getname_spkt()
3479 uaddr->sa_family = AF_PACKET; in packet_getname_spkt()
3480 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); in packet_getname_spkt()
3482 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); in packet_getname_spkt()
3484 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); in packet_getname_spkt()
3494 struct sock *sk = sock->sk; in packet_getname()
3499 return -EOPNOTSUPP; in packet_getname()
3501 sll->sll_family = AF_PACKET; in packet_getname()
3502 sll->sll_ifindex = po->ifindex; in packet_getname()
3503 sll->sll_protocol = po->num; in packet_getname()
3504 sll->sll_pkttype = 0; in packet_getname()
3506 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); in packet_getname()
3508 sll->sll_hatype = dev->type; in packet_getname()
3509 sll->sll_halen = dev->addr_len; in packet_getname()
3510 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); in packet_getname()
3512 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ in packet_getname()
3513 sll->sll_halen = 0; in packet_getname()
3517 return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; in packet_getname()
3523 switch (i->type) { in packet_dev_mc()
3525 if (i->alen != dev->addr_len) in packet_dev_mc()
3526 return -EINVAL; in packet_dev_mc()
3528 return dev_mc_add(dev, i->addr); in packet_dev_mc()
3530 return dev_mc_del(dev, i->addr); in packet_dev_mc()
3537 if (i->alen != dev->addr_len) in packet_dev_mc()
3538 return -EINVAL; in packet_dev_mc()
3540 return dev_uc_add(dev, i->addr); in packet_dev_mc()
3542 return dev_uc_del(dev, i->addr); in packet_dev_mc()
3556 if (ml->ifindex == dev->ifindex) { in packet_dev_mclist_delete()
3557 packet_dev_mc(dev, ml, -1); in packet_dev_mclist_delete()
3558 *mlp = ml->next; in packet_dev_mclist_delete()
3561 mlp = &ml->next; in packet_dev_mclist_delete()
3574 err = -ENODEV; in packet_mc_add()
3575 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); in packet_mc_add()
3579 err = -EINVAL; in packet_mc_add()
3580 if (mreq->mr_alen > dev->addr_len) in packet_mc_add()
3583 err = -ENOBUFS; in packet_mc_add()
3589 for (ml = po->mclist; ml; ml = ml->next) { in packet_mc_add()
3590 if (ml->ifindex == mreq->mr_ifindex && in packet_mc_add()
3591 ml->type == mreq->mr_type && in packet_mc_add()
3592 ml->alen == mreq->mr_alen && in packet_mc_add()
3593 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { in packet_mc_add()
3594 ml->count++; in packet_mc_add()
3601 i->type = mreq->mr_type; in packet_mc_add()
3602 i->ifindex = mreq->mr_ifindex; in packet_mc_add()
3603 i->alen = mreq->mr_alen; in packet_mc_add()
3604 memcpy(i->addr, mreq->mr_address, i->alen); in packet_mc_add()
3605 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen); in packet_mc_add()
3606 i->count = 1; in packet_mc_add()
3607 i->next = po->mclist; in packet_mc_add()
3608 po->mclist = i; in packet_mc_add()
3611 po->mclist = i->next; in packet_mc_add()
3626 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { in packet_mc_drop()
3627 if (ml->ifindex == mreq->mr_ifindex && in packet_mc_drop()
3628 ml->type == mreq->mr_type && in packet_mc_drop()
3629 ml->alen == mreq->mr_alen && in packet_mc_drop()
3630 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { in packet_mc_drop()
3631 if (--ml->count == 0) { in packet_mc_drop()
3633 *mlp = ml->next; in packet_mc_drop()
3634 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); in packet_mc_drop()
3636 packet_dev_mc(dev, ml, -1); in packet_mc_drop()
3651 if (!po->mclist) in packet_flush_mclist()
3655 while ((ml = po->mclist) != NULL) { in packet_flush_mclist()
3658 po->mclist = ml->next; in packet_flush_mclist()
3659 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); in packet_flush_mclist()
3661 packet_dev_mc(dev, ml, -1); in packet_flush_mclist()
3671 struct sock *sk = sock->sk; in packet_setsockopt()
3676 return -ENOPROTOOPT; in packet_setsockopt()
3686 return -EINVAL; in packet_setsockopt()
3690 return -EFAULT; in packet_setsockopt()
3692 return -EINVAL; in packet_setsockopt()
3707 switch (po->tp_version) { in packet_setsockopt()
3718 ret = -EINVAL; in packet_setsockopt()
3721 ret = -EFAULT; in packet_setsockopt()
3734 return -EINVAL; in packet_setsockopt()
3736 return -EFAULT; in packet_setsockopt()
3738 pkt_sk(sk)->copy_thresh = val; in packet_setsockopt()
3746 return -EINVAL; in packet_setsockopt()
3748 return -EFAULT; in packet_setsockopt()
3755 return -EINVAL; in packet_setsockopt()
3758 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { in packet_setsockopt()
3759 ret = -EBUSY; in packet_setsockopt()
3761 po->tp_version = val; in packet_setsockopt()
3772 return -EINVAL; in packet_setsockopt()
3774 return -EFAULT; in packet_setsockopt()
3776 return -EINVAL; in packet_setsockopt()
3778 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { in packet_setsockopt()
3779 ret = -EBUSY; in packet_setsockopt()
3781 po->tp_reserve = val; in packet_setsockopt()
3792 return -EINVAL; in packet_setsockopt()
3794 return -EFAULT; in packet_setsockopt()
3797 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { in packet_setsockopt()
3798 ret = -EBUSY; in packet_setsockopt()
3800 po->tp_loss = !!val; in packet_setsockopt()
3811 return -EINVAL; in packet_setsockopt()
3813 return -EFAULT; in packet_setsockopt()
3816 po->auxdata = !!val; in packet_setsockopt()
3825 return -EINVAL; in packet_setsockopt()
3827 return -EFAULT; in packet_setsockopt()
3830 po->origdev = !!val; in packet_setsockopt()
3838 if (sock->type != SOCK_RAW) in packet_setsockopt()
3839 return -EINVAL; in packet_setsockopt()
3841 return -EINVAL; in packet_setsockopt()
3843 return -EFAULT; in packet_setsockopt()
3846 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { in packet_setsockopt()
3847 ret = -EBUSY; in packet_setsockopt()
3849 po->has_vnet_hdr = !!val; in packet_setsockopt()
3860 return -EINVAL; in packet_setsockopt()
3862 return -EFAULT; in packet_setsockopt()
3864 po->tp_tstamp = val; in packet_setsockopt()
3872 return -EINVAL; in packet_setsockopt()
3874 return -EFAULT; in packet_setsockopt()
3880 if (!po->fanout) in packet_setsockopt()
3881 return -EINVAL; in packet_setsockopt()
3890 return -EINVAL; in packet_setsockopt()
3892 return -EFAULT; in packet_setsockopt()
3894 return -EINVAL; in packet_setsockopt()
3896 po->prot_hook.ignore_outgoing = !!val; in packet_setsockopt()
3904 return -EINVAL; in packet_setsockopt()
3906 return -EFAULT; in packet_setsockopt()
3909 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { in packet_setsockopt()
3910 ret = -EBUSY; in packet_setsockopt()
3912 po->tp_tx_has_off = !!val; in packet_setsockopt()
3923 return -EINVAL; in packet_setsockopt()
3925 return -EFAULT; in packet_setsockopt()
3927 po->xmit = val ? packet_direct_xmit : dev_queue_xmit; in packet_setsockopt()
3931 return -ENOPROTOOPT; in packet_setsockopt()
3940 struct sock *sk = sock->sk; in packet_getsockopt()
3948 return -ENOPROTOOPT; in packet_getsockopt()
3951 return -EFAULT; in packet_getsockopt()
3954 return -EINVAL; in packet_getsockopt()
3958 spin_lock_bh(&sk->sk_receive_queue.lock); in packet_getsockopt()
3959 memcpy(&st, &po->stats, sizeof(st)); in packet_getsockopt()
3960 memset(&po->stats, 0, sizeof(po->stats)); in packet_getsockopt()
3961 spin_unlock_bh(&sk->sk_receive_queue.lock); in packet_getsockopt()
3962 drops = atomic_xchg(&po->tp_drops, 0); in packet_getsockopt()
3964 if (po->tp_version == TPACKET_V3) { in packet_getsockopt()
3978 val = po->auxdata; in packet_getsockopt()
3981 val = po->origdev; in packet_getsockopt()
3984 val = po->has_vnet_hdr; in packet_getsockopt()
3987 val = po->tp_version; in packet_getsockopt()
3993 return -EINVAL; in packet_getsockopt()
3995 return -EFAULT; in packet_getsockopt()
4007 return -EINVAL; in packet_getsockopt()
4011 val = po->tp_reserve; in packet_getsockopt()
4014 val = po->tp_loss; in packet_getsockopt()
4017 val = po->tp_tstamp; in packet_getsockopt()
4020 val = (po->fanout ? in packet_getsockopt()
4021 ((u32)po->fanout->id | in packet_getsockopt()
4022 ((u32)po->fanout->type << 16) | in packet_getsockopt()
4023 ((u32)po->fanout->flags << 24)) : in packet_getsockopt()
4027 val = po->prot_hook.ignore_outgoing; in packet_getsockopt()
4030 if (!po->rollover) in packet_getsockopt()
4031 return -EINVAL; in packet_getsockopt()
4032 rstats.tp_all = atomic_long_read(&po->rollover->num); in packet_getsockopt()
4033 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); in packet_getsockopt()
4034 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); in packet_getsockopt()
4039 val = po->tp_tx_has_off; in packet_getsockopt()
4045 return -ENOPROTOOPT; in packet_getsockopt()
4051 return -EFAULT; in packet_getsockopt()
4053 return -EFAULT; in packet_getsockopt()
4065 sk_for_each_rcu(sk, &net->packet.sklist) { in packet_notifier()
4070 if (po->mclist) in packet_notifier()
4071 packet_dev_mclist_delete(dev, &po->mclist); in packet_notifier()
4075 if (dev->ifindex == po->ifindex) { in packet_notifier()
4076 spin_lock(&po->bind_lock); in packet_notifier()
4077 if (po->running) { in packet_notifier()
4079 sk->sk_err = ENETDOWN; in packet_notifier()
4081 sk->sk_error_report(sk); in packet_notifier()
4085 po->ifindex = -1; in packet_notifier()
4086 if (po->prot_hook.dev) in packet_notifier()
4087 dev_put(po->prot_hook.dev); in packet_notifier()
4088 po->prot_hook.dev = NULL; in packet_notifier()
4090 spin_unlock(&po->bind_lock); in packet_notifier()
4094 if (dev->ifindex == po->ifindex) { in packet_notifier()
4095 spin_lock(&po->bind_lock); in packet_notifier()
4096 if (po->num) in packet_notifier()
4098 spin_unlock(&po->bind_lock); in packet_notifier()
4111 struct sock *sk = sock->sk; in packet_ioctl()
4125 spin_lock_bh(&sk->sk_receive_queue.lock); in packet_ioctl()
4126 skb = skb_peek(&sk->sk_receive_queue); in packet_ioctl()
4128 amount = skb->len; in packet_ioctl()
4129 spin_unlock_bh(&sk->sk_receive_queue.lock); in packet_ioctl()
4151 return -ENOIOCTLCMD; in packet_ioctl()
4159 struct sock *sk = sock->sk; in packet_poll()
4163 spin_lock_bh(&sk->sk_receive_queue.lock); in packet_poll()
4164 if (po->rx_ring.pg_vec) { in packet_poll()
4165 if (!packet_previous_rx_frame(po, &po->rx_ring, in packet_poll()
4170 spin_unlock_bh(&sk->sk_receive_queue.lock); in packet_poll()
4171 spin_lock_bh(&sk->sk_write_queue.lock); in packet_poll()
4172 if (po->tx_ring.pg_vec) { in packet_poll()
4173 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) in packet_poll()
4176 spin_unlock_bh(&sk->sk_write_queue.lock); in packet_poll()
4187 struct file *file = vma->vm_file; in packet_mm_open()
4188 struct socket *sock = file->private_data; in packet_mm_open()
4189 struct sock *sk = sock->sk; in packet_mm_open()
4192 atomic_inc(&pkt_sk(sk)->mapped); in packet_mm_open()
4197 struct file *file = vma->vm_file; in packet_mm_close()
4198 struct socket *sock = file->private_data; in packet_mm_close()
4199 struct sock *sk = sock->sk; in packet_mm_close()
4202 atomic_dec(&pkt_sk(sk)->mapped); in packet_mm_close()
4243 /* vmalloc failed, lets dig into swap here */ in alloc_one_pg_vec_page()
4255 unsigned int block_nr = req->tp_block_nr; in alloc_pg_vec()
4290 struct tpacket_req *req = &req_u->req; in packet_set_ring()
4292 rb = tx_ring ? &po->tx_ring : &po->rx_ring; in packet_set_ring()
4293 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; in packet_set_ring()
4295 err = -EBUSY; in packet_set_ring()
4297 if (atomic_read(&po->mapped)) in packet_set_ring()
4303 if (req->tp_block_nr) { in packet_set_ring()
4307 err = -EBUSY; in packet_set_ring()
4308 if (unlikely(rb->pg_vec)) in packet_set_ring()
4311 switch (po->tp_version) { in packet_set_ring()
4313 po->tp_hdrlen = TPACKET_HDRLEN; in packet_set_ring()
4316 po->tp_hdrlen = TPACKET2_HDRLEN; in packet_set_ring()
4319 po->tp_hdrlen = TPACKET3_HDRLEN; in packet_set_ring()
4323 err = -EINVAL; in packet_set_ring()
4324 if (unlikely((int)req->tp_block_size <= 0)) in packet_set_ring()
4326 if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) in packet_set_ring()
4328 min_frame_size = po->tp_hdrlen + po->tp_reserve; in packet_set_ring()
4329 if (po->tp_version >= TPACKET_V3 && in packet_set_ring()
4330 req->tp_block_size < in packet_set_ring()
4331 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size) in packet_set_ring()
4333 if (unlikely(req->tp_frame_size < min_frame_size)) in packet_set_ring()
4335 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) in packet_set_ring()
4338 rb->frames_per_block = req->tp_block_size / req->tp_frame_size; in packet_set_ring()
4339 if (unlikely(rb->frames_per_block == 0)) in packet_set_ring()
4341 if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr)) in packet_set_ring()
4343 if (unlikely((rb->frames_per_block * req->tp_block_nr) != in packet_set_ring()
4344 req->tp_frame_nr)) in packet_set_ring()
4347 err = -ENOMEM; in packet_set_ring()
4348 order = get_order(req->tp_block_size); in packet_set_ring()
4352 switch (po->tp_version) { in packet_set_ring()
4358 struct tpacket_req3 *req3 = &req_u->req3; in packet_set_ring()
4360 if (req3->tp_retire_blk_tov || in packet_set_ring()
4361 req3->tp_sizeof_priv || in packet_set_ring()
4362 req3->tp_feature_req_word) { in packet_set_ring()
4363 err = -EINVAL; in packet_set_ring()
4370 rx_owner_map = bitmap_alloc(req->tp_frame_nr, in packet_set_ring()
4380 err = -EINVAL; in packet_set_ring()
4381 if (unlikely(req->tp_frame_nr)) in packet_set_ring()
4387 spin_lock(&po->bind_lock); in packet_set_ring()
4388 was_running = po->running; in packet_set_ring()
4389 num = po->num; in packet_set_ring()
4391 po->num = 0; in packet_set_ring()
4394 spin_unlock(&po->bind_lock); in packet_set_ring()
4398 err = -EBUSY; in packet_set_ring()
4399 mutex_lock(&po->pg_vec_lock); in packet_set_ring()
4400 if (closing || atomic_read(&po->mapped) == 0) { in packet_set_ring()
4402 spin_lock_bh(&rb_queue->lock); in packet_set_ring()
4403 swap(rb->pg_vec, pg_vec); in packet_set_ring()
4404 if (po->tp_version <= TPACKET_V2) in packet_set_ring()
4405 swap(rb->rx_owner_map, rx_owner_map); in packet_set_ring()
4406 rb->frame_max = (req->tp_frame_nr - 1); in packet_set_ring()
4407 rb->head = 0; in packet_set_ring()
4408 rb->frame_size = req->tp_frame_size; in packet_set_ring()
4409 spin_unlock_bh(&rb_queue->lock); in packet_set_ring()
4411 swap(rb->pg_vec_order, order); in packet_set_ring()
4412 swap(rb->pg_vec_len, req->tp_block_nr); in packet_set_ring()
4414 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; in packet_set_ring()
4415 po->prot_hook.func = (po->rx_ring.pg_vec) ? in packet_set_ring()
4418 if (atomic_read(&po->mapped)) in packet_set_ring()
4420 atomic_read(&po->mapped)); in packet_set_ring()
4422 mutex_unlock(&po->pg_vec_lock); in packet_set_ring()
4424 spin_lock(&po->bind_lock); in packet_set_ring()
4426 po->num = num; in packet_set_ring()
4429 spin_unlock(&po->bind_lock); in packet_set_ring()
4430 if (pg_vec && (po->tp_version > TPACKET_V2)) { in packet_set_ring()
4431 /* Because we don't support block-based V3 on tx-ring */ in packet_set_ring()
4439 free_pg_vec(pg_vec, order, req->tp_block_nr); in packet_set_ring()
4447 struct sock *sk = sock->sk; in packet_mmap()
4452 int err = -EINVAL; in packet_mmap()
4455 if (vma->vm_pgoff) in packet_mmap()
4456 return -EINVAL; in packet_mmap()
4458 mutex_lock(&po->pg_vec_lock); in packet_mmap()
4461 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { in packet_mmap()
4462 if (rb->pg_vec) { in packet_mmap()
4463 expected_size += rb->pg_vec_len in packet_mmap()
4464 * rb->pg_vec_pages in packet_mmap()
4472 size = vma->vm_end - vma->vm_start; in packet_mmap()
4476 start = vma->vm_start; in packet_mmap()
4477 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { in packet_mmap()
4478 if (rb->pg_vec == NULL) in packet_mmap()
4481 for (i = 0; i < rb->pg_vec_len; i++) { in packet_mmap()
4483 void *kaddr = rb->pg_vec[i].buffer; in packet_mmap()
4486 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { in packet_mmap()
4497 atomic_inc(&po->mapped); in packet_mmap()
4498 vma->vm_ops = &packet_mmap_ops; in packet_mmap()
4502 mutex_unlock(&po->pg_vec_lock); in packet_mmap()
4566 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos); in packet_seq_start()
4572 return seq_hlist_next_rcu(v, &net->packet.sklist, pos); in packet_seq_next()
4590 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", in packet_seq_show()
4592 refcount_read(&s->sk_refcnt), in packet_seq_show()
4593 s->sk_type, in packet_seq_show()
4594 ntohs(po->num), in packet_seq_show()
4595 po->ifindex, in packet_seq_show()
4596 po->running, in packet_seq_show()
4597 atomic_read(&s->sk_rmem_alloc), in packet_seq_show()
4615 mutex_init(&net->packet.sklist_lock); in packet_net_init()
4616 INIT_HLIST_HEAD(&net->packet.sklist); in packet_net_init()
4618 if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops, in packet_net_init()
4620 return -ENOMEM; in packet_net_init()
4627 remove_proc_entry("packet", net->proc_net); in packet_net_exit()
4628 WARN_ON_ONCE(!hlist_empty(&net->packet.sklist)); in packet_net_exit()