Lines Matching refs:vp

101 static void vector_reset_stats(struct vector_private *vp)  in vector_reset_stats()  argument
103 vp->estats.rx_queue_max = 0; in vector_reset_stats()
104 vp->estats.rx_queue_running_average = 0; in vector_reset_stats()
105 vp->estats.tx_queue_max = 0; in vector_reset_stats()
106 vp->estats.tx_queue_running_average = 0; in vector_reset_stats()
107 vp->estats.rx_encaps_errors = 0; in vector_reset_stats()
108 vp->estats.tx_timeout_count = 0; in vector_reset_stats()
109 vp->estats.tx_restart_queue = 0; in vector_reset_stats()
110 vp->estats.tx_kicks = 0; in vector_reset_stats()
111 vp->estats.tx_flow_control_xon = 0; in vector_reset_stats()
112 vp->estats.tx_flow_control_xoff = 0; in vector_reset_stats()
113 vp->estats.sg_ok = 0; in vector_reset_stats()
114 vp->estats.sg_linearized = 0; in vector_reset_stats()
264 static int prep_msg(struct vector_private *vp, in prep_msg() argument
277 if (vp->header_size > 0) { in prep_msg()
278 iov[iov_index].iov_len = vp->header_size; in prep_msg()
279 vp->form_header(iov[iov_index].iov_base, skb, vp); in prep_msg()
285 vp->estats.sg_ok++; in prep_msg()
307 struct vector_private *vp = netdev_priv(qi->dev); in vector_enqueue() local
326 vp, in vector_enqueue()
333 mmsg_vector->msg_hdr.msg_name = vp->fds->remote_addr; in vector_enqueue()
334 mmsg_vector->msg_hdr.msg_namelen = vp->fds->remote_addr_size; in vector_enqueue()
381 struct vector_private *vp = netdev_priv(qi->dev); in vector_send() local
401 vp->fds->tx_fd, in vector_send()
406 vp->in_write_poll = in vector_send()
416 netdev_err(vp->dev, "sendmmsg err=%i\n", in vector_send()
427 if (result > vp->estats.tx_queue_max) in vector_send()
428 vp->estats.tx_queue_max = result; in vector_send()
429 vp->estats.tx_queue_running_average = in vector_send()
430 (vp->estats.tx_queue_running_average + result) >> 1; in vector_send()
438 vp->estats.tx_restart_queue++; in vector_send()
445 tasklet_schedule(&vp->tx_poll); in vector_send()
458 struct vector_private *vp = netdev_priv(qi->dev); in destroy_queue() local
479 if ((vp->header_size > 0) && in destroy_queue()
495 struct vector_private *vp, in create_queue() argument
509 result->dev = vp->dev; in create_queue()
533 if (vp->header_size > 0) in create_queue()
552 if (vp->header_size > 0) { in create_queue()
590 struct vector_private *vp, in prep_skb() argument
593 int linear = vp->max_packet + vp->headroom + SAFETY_MARGIN; in prep_skb()
600 if (vp->req_size <= linear) in prep_skb()
603 len = vp->req_size; in prep_skb()
606 len - vp->max_packet, in prep_skb()
611 if (vp->header_size > 0) in prep_skb()
618 skb_reserve(result, vp->headroom); in prep_skb()
619 result->dev = vp->dev; in prep_skb()
620 skb_put(result, vp->max_packet); in prep_skb()
621 result->data_len = len - vp->max_packet; in prep_skb()
622 result->len += len - vp->max_packet; in prep_skb()
626 iov[iov_index].iov_len = vp->max_packet; in prep_skb()
649 struct vector_private *vp = netdev_priv(qi->dev); in prep_queue_for_rx() local
662 *skbuff_vector = prep_skb(vp, &mmsg_vector->msg_hdr); in prep_queue_for_rx()
770 struct vector_private *vp; in vector_remove() local
776 vp = netdev_priv(dev); in vector_remove()
777 if (vp->fds != NULL) in vector_remove()
811 static int vector_legacy_rx(struct vector_private *vp) in vector_legacy_rx() argument
827 if (vp->header_size > 0) { in vector_legacy_rx()
828 iov[0].iov_base = vp->header_rxbuffer; in vector_legacy_rx()
829 iov[0].iov_len = vp->header_size; in vector_legacy_rx()
832 skb = prep_skb(vp, &hdr); in vector_legacy_rx()
841 vp->dev->stats.rx_dropped++; in vector_legacy_rx()
844 pkt_len = uml_vector_recvmsg(vp->fds->rx_fd, &hdr, 0); in vector_legacy_rx()
847 if (pkt_len > vp->header_size) { in vector_legacy_rx()
848 if (vp->header_size > 0) { in vector_legacy_rx()
849 header_check = vp->verify_header( in vector_legacy_rx()
850 vp->header_rxbuffer, skb, vp); in vector_legacy_rx()
853 vp->dev->stats.rx_dropped++; in vector_legacy_rx()
854 vp->estats.rx_encaps_errors++; in vector_legacy_rx()
858 vp->estats.rx_csum_offload_good++; in vector_legacy_rx()
862 pskb_trim(skb, pkt_len - vp->rx_header_size); in vector_legacy_rx()
864 vp->dev->stats.rx_bytes += skb->len; in vector_legacy_rx()
865 vp->dev->stats.rx_packets++; in vector_legacy_rx()
881 static int writev_tx(struct vector_private *vp, struct sk_buff *skb) in writev_tx() argument
886 iov[0].iov_base = vp->header_txbuffer; in writev_tx()
887 iov_count = prep_msg(vp, skb, (struct iovec *) &iov); in writev_tx()
892 vp->fds->tx_fd, in writev_tx()
897 netif_trans_update(vp->dev); in writev_tx()
898 netif_wake_queue(vp->dev); in writev_tx()
901 vp->dev->stats.tx_bytes += skb->len; in writev_tx()
902 vp->dev->stats.tx_packets++; in writev_tx()
904 vp->dev->stats.tx_dropped++; in writev_tx()
909 vp->dev->stats.tx_dropped++; in writev_tx()
919 static int vector_mmsg_rx(struct vector_private *vp) in vector_mmsg_rx() argument
922 struct vector_queue *qi = vp->rx_queue; in vector_mmsg_rx()
937 vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0); in vector_mmsg_rx()
951 if (mmsg_vector->msg_len > vp->header_size) { in vector_mmsg_rx()
952 if (vp->header_size > 0) { in vector_mmsg_rx()
953 header_check = vp->verify_header( in vector_mmsg_rx()
956 vp in vector_mmsg_rx()
965 vp->estats.rx_encaps_errors++; in vector_mmsg_rx()
969 vp->estats.rx_csum_offload_good++; in vector_mmsg_rx()
974 mmsg_vector->msg_len - vp->rx_header_size); in vector_mmsg_rx()
980 vp->dev->stats.rx_bytes += skb->len; in vector_mmsg_rx()
981 vp->dev->stats.rx_packets++; in vector_mmsg_rx()
997 if (vp->estats.rx_queue_max < packet_count) in vector_mmsg_rx()
998 vp->estats.rx_queue_max = packet_count; in vector_mmsg_rx()
999 vp->estats.rx_queue_running_average = in vector_mmsg_rx()
1000 (vp->estats.rx_queue_running_average + packet_count) >> 1; in vector_mmsg_rx()
1005 static void vector_rx(struct vector_private *vp) in vector_rx() argument
1009 if ((vp->options & VECTOR_RX) > 0) in vector_rx()
1010 while ((err = vector_mmsg_rx(vp)) > 0) in vector_rx()
1013 while ((err = vector_legacy_rx(vp)) > 0) in vector_rx()
1016 netdev_err(vp->dev, "vector_rx: error(%d)\n", err); in vector_rx()
1021 struct vector_private *vp = netdev_priv(dev); in vector_net_start_xmit() local
1024 if ((vp->options & VECTOR_TX) == 0) { in vector_net_start_xmit()
1025 writev_tx(vp, skb); in vector_net_start_xmit()
1033 netdev_sent_queue(vp->dev, skb->len); in vector_net_start_xmit()
1034 queue_depth = vector_enqueue(vp->tx_queue, skb); in vector_net_start_xmit()
1040 if (queue_depth >= vp->tx_queue->max_depth - 1) { in vector_net_start_xmit()
1041 vp->estats.tx_kicks++; in vector_net_start_xmit()
1043 vector_send(vp->tx_queue); in vector_net_start_xmit()
1047 mod_timer(&vp->tl, vp->coalesce); in vector_net_start_xmit()
1051 vp->estats.tx_kicks++; in vector_net_start_xmit()
1052 vector_send(vp->tx_queue); in vector_net_start_xmit()
1054 tasklet_schedule(&vp->tx_poll); in vector_net_start_xmit()
1061 struct vector_private *vp = netdev_priv(dev); in vector_rx_interrupt() local
1065 vector_rx(vp); in vector_rx_interrupt()
1073 struct vector_private *vp = netdev_priv(dev); in vector_tx_interrupt() local
1084 if (vp->in_write_poll) in vector_tx_interrupt()
1085 tasklet_schedule(&vp->tx_poll); in vector_tx_interrupt()
1094 struct vector_private *vp = netdev_priv(dev); in vector_net_close() local
1098 del_timer(&vp->tl); in vector_net_close()
1100 if (vp->fds == NULL) in vector_net_close()
1104 if (vp->rx_irq > 0) { in vector_net_close()
1105 um_free_irq(vp->rx_irq, dev); in vector_net_close()
1106 vp->rx_irq = 0; in vector_net_close()
1108 if (vp->tx_irq > 0) { in vector_net_close()
1109 um_free_irq(vp->tx_irq, dev); in vector_net_close()
1110 vp->tx_irq = 0; in vector_net_close()
1112 tasklet_kill(&vp->tx_poll); in vector_net_close()
1113 if (vp->fds->rx_fd > 0) { in vector_net_close()
1114 os_close_file(vp->fds->rx_fd); in vector_net_close()
1115 vp->fds->rx_fd = -1; in vector_net_close()
1117 if (vp->fds->tx_fd > 0) { in vector_net_close()
1118 os_close_file(vp->fds->tx_fd); in vector_net_close()
1119 vp->fds->tx_fd = -1; in vector_net_close()
1121 if (vp->bpf != NULL) in vector_net_close()
1122 kfree(vp->bpf); in vector_net_close()
1123 if (vp->fds->remote_addr != NULL) in vector_net_close()
1124 kfree(vp->fds->remote_addr); in vector_net_close()
1125 if (vp->transport_data != NULL) in vector_net_close()
1126 kfree(vp->transport_data); in vector_net_close()
1127 if (vp->header_rxbuffer != NULL) in vector_net_close()
1128 kfree(vp->header_rxbuffer); in vector_net_close()
1129 if (vp->header_txbuffer != NULL) in vector_net_close()
1130 kfree(vp->header_txbuffer); in vector_net_close()
1131 if (vp->rx_queue != NULL) in vector_net_close()
1132 destroy_queue(vp->rx_queue); in vector_net_close()
1133 if (vp->tx_queue != NULL) in vector_net_close()
1134 destroy_queue(vp->tx_queue); in vector_net_close()
1135 kfree(vp->fds); in vector_net_close()
1136 vp->fds = NULL; in vector_net_close()
1137 spin_lock_irqsave(&vp->lock, flags); in vector_net_close()
1138 vp->opened = false; in vector_net_close()
1139 spin_unlock_irqrestore(&vp->lock, flags); in vector_net_close()
1147 struct vector_private *vp = (struct vector_private *)data; in vector_tx_poll() local
1149 vp->estats.tx_kicks++; in vector_tx_poll()
1150 vector_send(vp->tx_queue); in vector_tx_poll()
1154 struct vector_private *vp = in vector_reset_tx() local
1156 netdev_reset_queue(vp->dev); in vector_reset_tx()
1157 netif_start_queue(vp->dev); in vector_reset_tx()
1158 netif_wake_queue(vp->dev); in vector_reset_tx()
1162 struct vector_private *vp = netdev_priv(dev); in vector_net_open() local
1167 spin_lock_irqsave(&vp->lock, flags); in vector_net_open()
1168 if (vp->opened) { in vector_net_open()
1169 spin_unlock_irqrestore(&vp->lock, flags); in vector_net_open()
1172 vp->opened = true; in vector_net_open()
1173 spin_unlock_irqrestore(&vp->lock, flags); in vector_net_open()
1175 vp->fds = uml_vector_user_open(vp->unit, vp->parsed); in vector_net_open()
1177 if (vp->fds == NULL) in vector_net_open()
1180 if (build_transport_data(vp) < 0) in vector_net_open()
1183 if ((vp->options & VECTOR_RX) > 0) { in vector_net_open()
1184 vp->rx_queue = create_queue( in vector_net_open()
1185 vp, in vector_net_open()
1186 get_depth(vp->parsed), in vector_net_open()
1187 vp->rx_header_size, in vector_net_open()
1190 vp->rx_queue->queue_depth = get_depth(vp->parsed); in vector_net_open()
1192 vp->header_rxbuffer = kmalloc( in vector_net_open()
1193 vp->rx_header_size, in vector_net_open()
1196 if (vp->header_rxbuffer == NULL) in vector_net_open()
1199 if ((vp->options & VECTOR_TX) > 0) { in vector_net_open()
1200 vp->tx_queue = create_queue( in vector_net_open()
1201 vp, in vector_net_open()
1202 get_depth(vp->parsed), in vector_net_open()
1203 vp->header_size, in vector_net_open()
1207 vp->header_txbuffer = kmalloc(vp->header_size, GFP_KERNEL); in vector_net_open()
1208 if (vp->header_txbuffer == NULL) in vector_net_open()
1214 irq_rr + VECTOR_BASE_IRQ, vp->fds->rx_fd, in vector_net_open()
1222 vp->rx_irq = irq_rr + VECTOR_BASE_IRQ; in vector_net_open()
1227 if ((vp->options & VECTOR_TX) > 0) { in vector_net_open()
1229 irq_rr + VECTOR_BASE_IRQ, vp->fds->tx_fd, in vector_net_open()
1238 vp->tx_irq = irq_rr + VECTOR_BASE_IRQ; in vector_net_open()
1242 if ((vp->options & VECTOR_QDISC_BYPASS) != 0) { in vector_net_open()
1243 if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd)) in vector_net_open()
1244 vp->options |= VECTOR_BPF; in vector_net_open()
1246 if ((vp->options & VECTOR_BPF) != 0) in vector_net_open()
1247 vp->bpf = uml_vector_default_bpf(vp->fds->rx_fd, dev->dev_addr); in vector_net_open()
1256 vector_rx(vp); in vector_net_open()
1258 vector_reset_stats(vp); in vector_net_open()
1259 vdevice = find_device(vp->unit); in vector_net_open()
1262 if ((vp->options & VECTOR_TX) != 0) in vector_net_open()
1263 add_timer(&vp->tl); in vector_net_open()
1279 struct vector_private *vp = netdev_priv(dev); in vector_net_tx_timeout() local
1281 vp->estats.tx_timeout_count++; in vector_net_tx_timeout()
1283 schedule_work(&vp->reset_tx); in vector_net_tx_timeout()
1296 struct vector_private *vp = netdev_priv(dev); in vector_set_features() local
1303 vp->req_size = 65536; in vector_set_features()
1306 vp->req_size = vp->max_packet + vp->headroom + SAFETY_MARGIN; in vector_set_features()
1329 struct vector_private *vp = netdev_priv(netdev); in vector_get_ringparam() local
1331 ring->rx_max_pending = vp->rx_queue->max_depth; in vector_get_ringparam()
1332 ring->tx_max_pending = vp->tx_queue->max_depth; in vector_get_ringparam()
1333 ring->rx_pending = vp->rx_queue->max_depth; in vector_get_ringparam()
1334 ring->tx_pending = vp->tx_queue->max_depth; in vector_get_ringparam()
1368 struct vector_private *vp = netdev_priv(dev); in vector_get_ethtool_stats() local
1370 memcpy(tmp_stats, &vp->estats, sizeof(struct vector_estats)); in vector_get_ethtool_stats()
1376 struct vector_private *vp = netdev_priv(netdev); in vector_get_coalesce() local
1378 ec->tx_coalesce_usecs = (vp->coalesce * 1000000) / HZ; in vector_get_coalesce()
1385 struct vector_private *vp = netdev_priv(netdev); in vector_set_coalesce() local
1387 vp->coalesce = (ec->tx_coalesce_usecs * HZ) / 1000000; in vector_set_coalesce()
1388 if (vp->coalesce == 0) in vector_set_coalesce()
1389 vp->coalesce = 1; in vector_set_coalesce()
1424 struct vector_private *vp = from_timer(vp, t, tl); in vector_timer_expire() local
1426 vp->estats.tx_kicks++; in vector_timer_expire()
1427 vector_send(vp->tx_queue); in vector_timer_expire()
1437 struct vector_private *vp; in vector_eth_configure() local
1464 vp = netdev_priv(dev); in vector_eth_configure()
1481 *vp = ((struct vector_private) in vector_eth_configure()
1483 .list = LIST_HEAD_INIT(vp->list), in vector_eth_configure()
1510 tasklet_init(&vp->tx_poll, vector_tx_poll, (unsigned long)vp); in vector_eth_configure()
1511 INIT_WORK(&vp->reset_tx, vector_reset_tx); in vector_eth_configure()
1513 timer_setup(&vp->tl, vector_timer_expire, 0); in vector_eth_configure()
1514 spin_lock_init(&vp->lock); in vector_eth_configure()