| /Linux-v5.4/drivers/net/ethernet/sfc/falcon/ |
| D | rx.c | 76 ef4_rx_buf_next(struct ef4_rx_queue *rx_queue, struct ef4_rx_buffer *rx_buf) in ef4_rx_buf_next() argument 78 if (unlikely(rx_buf == ef4_rx_buffer(rx_queue, rx_queue->ptr_mask))) in ef4_rx_buf_next() 79 return ef4_rx_buffer(rx_queue, 0); in ef4_rx_buf_next() 106 static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue) in ef4_reuse_page() argument 108 struct ef4_nic *efx = rx_queue->efx; in ef4_reuse_page() 113 index = rx_queue->page_remove & rx_queue->page_ptr_mask; in ef4_reuse_page() 114 page = rx_queue->page_ring[index]; in ef4_reuse_page() 118 rx_queue->page_ring[index] = NULL; in ef4_reuse_page() 120 if (rx_queue->page_remove != rx_queue->page_add) in ef4_reuse_page() 121 ++rx_queue->page_remove; in ef4_reuse_page() [all …]
|
| D | nic.h | 110 ef4_rx_desc(struct ef4_rx_queue *rx_queue, unsigned int index) in ef4_rx_desc() argument 112 return ((ef4_qword_t *) (rx_queue->rxd.buf.addr)) + index; in ef4_rx_desc() 333 static inline int ef4_nic_probe_rx(struct ef4_rx_queue *rx_queue) in ef4_nic_probe_rx() argument 335 return rx_queue->efx->type->rx_probe(rx_queue); in ef4_nic_probe_rx() 337 static inline void ef4_nic_init_rx(struct ef4_rx_queue *rx_queue) in ef4_nic_init_rx() argument 339 rx_queue->efx->type->rx_init(rx_queue); in ef4_nic_init_rx() 341 static inline void ef4_nic_remove_rx(struct ef4_rx_queue *rx_queue) in ef4_nic_remove_rx() argument 343 rx_queue->efx->type->rx_remove(rx_queue); in ef4_nic_remove_rx() 345 static inline void ef4_nic_notify_rx_desc(struct ef4_rx_queue *rx_queue) in ef4_nic_notify_rx_desc() argument 347 rx_queue->efx->type->rx_write(rx_queue); in ef4_nic_notify_rx_desc() [all …]
|
| D | farch.c | 471 ef4_farch_build_rx_desc(struct ef4_rx_queue *rx_queue, unsigned index) in ef4_farch_build_rx_desc() argument 476 rxd = ef4_rx_desc(rx_queue, index); in ef4_farch_build_rx_desc() 477 rx_buf = ef4_rx_buffer(rx_queue, index); in ef4_farch_build_rx_desc() 481 rx_queue->efx->type->rx_buffer_padding, in ef4_farch_build_rx_desc() 489 void ef4_farch_rx_write(struct ef4_rx_queue *rx_queue) in ef4_farch_rx_write() argument 491 struct ef4_nic *efx = rx_queue->efx; in ef4_farch_rx_write() 495 while (rx_queue->notified_count != rx_queue->added_count) { in ef4_farch_rx_write() 497 rx_queue, in ef4_farch_rx_write() 498 rx_queue->notified_count & rx_queue->ptr_mask); in ef4_farch_rx_write() 499 ++rx_queue->notified_count; in ef4_farch_rx_write() [all …]
|
| D | net_driver.h | 444 struct ef4_rx_queue rx_queue; member 1090 int (*rx_probe)(struct ef4_rx_queue *rx_queue); 1091 void (*rx_init)(struct ef4_rx_queue *rx_queue); 1092 void (*rx_remove)(struct ef4_rx_queue *rx_queue); 1093 void (*rx_write)(struct ef4_rx_queue *rx_queue); 1094 void (*rx_defer_refill)(struct ef4_rx_queue *rx_queue); 1236 return channel->rx_queue.core_index >= 0; in ef4_channel_has_rx_queue() 1243 return &channel->rx_queue; in ef4_channel_get_rx_queue() 1251 for (_rx_queue = &(_channel)->rx_queue; \ 1256 ef4_rx_queue_channel(struct ef4_rx_queue *rx_queue) in ef4_rx_queue_channel() argument [all …]
|
| D | efx.h | 40 int ef4_probe_rx_queue(struct ef4_rx_queue *rx_queue); 41 void ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue); 42 void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue); 43 void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue); 44 void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic); 47 void ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index, 54 void ef4_schedule_slow_fill(struct ef4_rx_queue *rx_queue);
|
| D | efx.c | 253 struct ef4_rx_queue *rx_queue = in ef4_process_channel() local 257 ef4_fast_push_rx_descriptors(rx_queue, true); in ef4_process_channel() 428 struct ef4_rx_queue *rx_queue; in ef4_alloc_channel() local 447 rx_queue = &channel->rx_queue; in ef4_alloc_channel() 448 rx_queue->efx = efx; in ef4_alloc_channel() 449 timer_setup(&rx_queue->slow_fill, ef4_rx_slow_fill, 0); in ef4_alloc_channel() 461 struct ef4_rx_queue *rx_queue; in ef4_copy_channel() local 485 rx_queue = &channel->rx_queue; in ef4_copy_channel() 486 rx_queue->buffer = NULL; in ef4_copy_channel() 487 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); in ef4_copy_channel() [all …]
|
| /Linux-v5.4/drivers/net/ethernet/sfc/ |
| D | rx.c | 76 efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf) in efx_rx_buf_next() argument 78 if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask))) in efx_rx_buf_next() 79 return efx_rx_buffer(rx_queue, 0); in efx_rx_buf_next() 106 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) in efx_reuse_page() argument 108 struct efx_nic *efx = rx_queue->efx; in efx_reuse_page() 113 index = rx_queue->page_remove & rx_queue->page_ptr_mask; in efx_reuse_page() 114 page = rx_queue->page_ring[index]; in efx_reuse_page() 118 rx_queue->page_ring[index] = NULL; in efx_reuse_page() 120 if (rx_queue->page_remove != rx_queue->page_add) in efx_reuse_page() 121 ++rx_queue->page_remove; in efx_reuse_page() [all …]
|
| D | farch.c | 459 efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) in efx_farch_build_rx_desc() argument 464 rxd = efx_rx_desc(rx_queue, index); in efx_farch_build_rx_desc() 465 rx_buf = efx_rx_buffer(rx_queue, index); in efx_farch_build_rx_desc() 469 rx_queue->efx->type->rx_buffer_padding, in efx_farch_build_rx_desc() 477 void efx_farch_rx_write(struct efx_rx_queue *rx_queue) in efx_farch_rx_write() argument 479 struct efx_nic *efx = rx_queue->efx; in efx_farch_rx_write() 483 while (rx_queue->notified_count != rx_queue->added_count) { in efx_farch_rx_write() 485 rx_queue, in efx_farch_rx_write() 486 rx_queue->notified_count & rx_queue->ptr_mask); in efx_farch_rx_write() 487 ++rx_queue->notified_count; in efx_farch_rx_write() [all …]
|
| D | nic.h | 133 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) in efx_rx_desc() argument 135 return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index; in efx_rx_desc() 510 static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) in efx_nic_probe_rx() argument 512 return rx_queue->efx->type->rx_probe(rx_queue); in efx_nic_probe_rx() 514 static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue) in efx_nic_init_rx() argument 516 rx_queue->efx->type->rx_init(rx_queue); in efx_nic_init_rx() 518 static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) in efx_nic_remove_rx() argument 520 rx_queue->efx->type->rx_remove(rx_queue); in efx_nic_remove_rx() 522 static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) in efx_nic_notify_rx_desc() argument 524 rx_queue->efx->type->rx_write(rx_queue); in efx_nic_notify_rx_desc() [all …]
|
| D | efx.h | 37 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); 38 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); 39 void efx_init_rx_queue(struct efx_rx_queue *rx_queue); 40 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); 41 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic); 44 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 51 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
|
| D | net_driver.h | 503 struct efx_rx_queue rx_queue; member 1337 int (*rx_probe)(struct efx_rx_queue *rx_queue); 1338 void (*rx_init)(struct efx_rx_queue *rx_queue); 1339 void (*rx_remove)(struct efx_rx_queue *rx_queue); 1340 void (*rx_write)(struct efx_rx_queue *rx_queue); 1341 void (*rx_defer_refill)(struct efx_rx_queue *rx_queue); 1517 return channel->rx_queue.core_index >= 0; in efx_channel_has_rx_queue() 1524 return &channel->rx_queue; in efx_channel_get_rx_queue() 1532 for (_rx_queue = &(_channel)->rx_queue; \ 1537 efx_rx_queue_channel(struct efx_rx_queue *rx_queue) in efx_rx_queue_channel() argument [all …]
|
| D | ef10.c | 3072 static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue) in efx_ef10_rx_probe() argument 3074 return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf, in efx_ef10_rx_probe() 3075 (rx_queue->ptr_mask + 1) * in efx_ef10_rx_probe() 3080 static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) in efx_ef10_rx_init() argument 3085 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); in efx_ef10_rx_init() 3086 size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE; in efx_ef10_rx_init() 3087 struct efx_nic *efx = rx_queue->efx; in efx_ef10_rx_init() 3095 rx_queue->scatter_n = 0; in efx_ef10_rx_init() 3096 rx_queue->scatter_len = 0; in efx_ef10_rx_init() 3098 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1); in efx_ef10_rx_init() [all …]
|
| D | efx.c | 282 struct efx_rx_queue *rx_queue = in efx_process_channel() local 286 efx_fast_push_rx_descriptors(rx_queue, true); in efx_process_channel() 464 struct efx_rx_queue *rx_queue; in efx_alloc_channel() local 487 rx_queue = &channel->rx_queue; in efx_alloc_channel() 488 rx_queue->efx = efx; in efx_alloc_channel() 489 timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0); in efx_alloc_channel() 501 struct efx_rx_queue *rx_queue; in efx_copy_channel() local 525 rx_queue = &channel->rx_queue; in efx_copy_channel() 526 rx_queue->buffer = NULL; in efx_copy_channel() 527 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); in efx_copy_channel() [all …]
|
| /Linux-v5.4/drivers/net/ethernet/freescale/ |
| D | gianfar.c | 112 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, in gfar_init_rxbdp() argument 120 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) in gfar_init_rxbdp() 142 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); in gfar_init_tx_rx_base() 155 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size | in gfar_init_rqprm() 261 if (likely(priv->rx_queue[i]->rxcoalescing)) in gfar_configure_coalescing() 262 gfar_write(baddr + i, priv->rx_queue[i]->rxic); in gfar_configure_coalescing() 273 if (unlikely(priv->rx_queue[0]->rxcoalescing)) in gfar_configure_coalescing() 274 gfar_write(®s->rxic, priv->rx_queue[0]->rxic); in gfar_configure_coalescing() 291 rx_packets += priv->rx_queue[i]->stats.rx_packets; in gfar_get_stats() 292 rx_bytes += priv->rx_queue[i]->stats.rx_bytes; in gfar_get_stats() [all …]
|
| D | gianfar_ethtool.c | 253 struct gfar_priv_rx_q *rx_queue = NULL; in gfar_gcoalesce() local 266 rx_queue = priv->rx_queue[0]; in gfar_gcoalesce() 269 rxtime = get_ictt_value(rx_queue->rxic); in gfar_gcoalesce() 270 rxcount = get_icft_value(rx_queue->rxic); in gfar_gcoalesce() 360 priv->rx_queue[i]->rxcoalescing = 0; in gfar_scoalesce() 363 priv->rx_queue[i]->rxcoalescing = 1; in gfar_scoalesce() 367 priv->rx_queue[i]->rxic = mk_ic_value( in gfar_scoalesce() 408 struct gfar_priv_rx_q *rx_queue = NULL; in gfar_gringparam() local 411 rx_queue = priv->rx_queue[0]; in gfar_gringparam() 421 rvals->rx_pending = rx_queue->rx_ring_size; in gfar_gringparam() [all …]
|
| /Linux-v5.4/drivers/net/ethernet/ibm/ |
| D | ibmveth.c | 105 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off); in ibmveth_rxq_flags() 116 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle; in ibmveth_rxq_pending_buffer() 136 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); in ibmveth_rxq_frame_length() 397 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator; in ibmveth_rxq_get_buffer() 410 u32 q_index = adapter->rx_queue.index; in ibmveth_rxq_recycle_buffer() 411 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator; in ibmveth_rxq_recycle_buffer() 436 …ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].cor… in ibmveth_rxq_recycle_buffer() 440 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { in ibmveth_rxq_recycle_buffer() 441 adapter->rx_queue.index = 0; in ibmveth_rxq_recycle_buffer() 442 adapter->rx_queue.toggle = !adapter->rx_queue.toggle; in ibmveth_rxq_recycle_buffer() [all …]
|
| /Linux-v5.4/drivers/net/xen-netback/ |
| D | rx.c | 42 skb = skb_peek(&queue->rx_queue); in xenvif_rx_ring_slots_available() 74 spin_lock_irqsave(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail() 76 __skb_queue_tail(&queue->rx_queue, skb); in xenvif_rx_queue_tail() 85 spin_unlock_irqrestore(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail() 92 spin_lock_irq(&queue->rx_queue.lock); in xenvif_rx_dequeue() 94 skb = __skb_dequeue(&queue->rx_queue); in xenvif_rx_dequeue() 105 spin_unlock_irq(&queue->rx_queue.lock); in xenvif_rx_dequeue() 123 skb = skb_peek(&queue->rx_queue); in xenvif_rx_queue_drop_expired() 508 skb = skb_peek(&queue->rx_queue); in xenvif_rx_queue_timeout() 603 if (!skb_queue_empty(&queue->rx_queue)) in xenvif_kthread_guest_rx()
|
| /Linux-v5.4/drivers/net/vmxnet3/ |
| D | vmxnet3_drv.c | 1625 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter); in vmxnet3_rq_cleanup_all() 1684 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_rq_destroy_all_rxdataring() 1756 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter); in vmxnet3_rq_init_all() 1841 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter); in vmxnet3_rq_create_all() 1872 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i], in vmxnet3_do_poll() 1881 struct vmxnet3_rx_queue *rx_queue = container_of(napi, in vmxnet3_poll() local 1885 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget); in vmxnet3_poll() 1889 vmxnet3_enable_all_intrs(rx_queue->adapter); in vmxnet3_poll() 1912 &adapter->tx_queue[rq - adapter->rx_queue]; in vmxnet3_poll_rx_only() 2030 napi_schedule(&adapter->rx_queue[0].napi); in vmxnet3_intr() [all …]
|
| /Linux-v5.4/drivers/net/wireless/realtek/rtlwifi/ |
| D | usb.c | 312 skb_queue_head_init(&rtlusb->rx_queue); in _rtl_usb_init_rx() 515 struct sk_buff_head rx_queue; in _rtl_rx_pre_process() local 518 skb_queue_head_init(&rx_queue); in _rtl_rx_pre_process() 520 rtlusb->usb_rx_segregate_hdl(hw, skb, &rx_queue); in _rtl_rx_pre_process() 521 WARN_ON(skb_queue_empty(&rx_queue)); in _rtl_rx_pre_process() 522 while (!skb_queue_empty(&rx_queue)) { in _rtl_rx_pre_process() 523 _skb = skb_dequeue(&rx_queue); in _rtl_rx_pre_process() 537 while ((skb = skb_dequeue(&rtlusb->rx_queue))) { in _rtl_rx_work() 611 qlen = skb_queue_len(&rtlusb->rx_queue); in _rtl_rx_completed() 637 skb_queue_tail(&rtlusb->rx_queue, skb); in _rtl_rx_completed() [all …]
|
| /Linux-v5.4/drivers/net/ethernet/qlogic/qed/ |
| D | qed_ll2.c | 66 #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registered) 446 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; in qed_ll2_handle_slowpath() 481 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; in qed_ll2_rxq_handle_completion() 523 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; in qed_ll2_rxq_completion() 577 p_rx = &p_ll2_conn->rx_queue; in qed_ll2_rxq_flush() 634 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; in qed_ll2_lb_rxq_handler() 945 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; in qed_sp_ll2_rx_queue_start() 1165 &p_ll2_info->rx_queue.rxq_chain, NULL); in qed_ll2_acquire_connection_rx() 1171 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain); in qed_ll2_acquire_connection_rx() 1179 p_ll2_info->rx_queue.descq_array = p_descq; in qed_ll2_acquire_connection_rx() [all …]
|
| /Linux-v5.4/drivers/net/phy/ |
| D | dp83640.c | 117 struct sk_buff_head rx_queue; member 875 spin_lock(&dp83640->rx_queue.lock); in decode_rxts() 876 skb_queue_walk(&dp83640->rx_queue, skb) { in decode_rxts() 881 __skb_unlink(skb, &dp83640->rx_queue); in decode_rxts() 889 spin_unlock(&dp83640->rx_queue.lock); in decode_rxts() 1162 skb_queue_head_init(&dp83640->rx_queue); in dp83640_probe() 1202 skb_queue_purge(&dp83640->rx_queue); in dp83640_remove() 1415 while ((skb = skb_dequeue(&dp83640->rx_queue))) { in rx_timestamp_work() 1420 skb_queue_head(&dp83640->rx_queue, skb); in rx_timestamp_work() 1427 if (!skb_queue_empty(&dp83640->rx_queue)) in rx_timestamp_work() [all …]
|
| /Linux-v5.4/drivers/net/ethernet/ti/ |
| D | netcp_core.c | 624 dma = knav_queue_pop(netcp->rx_queue, &dma_sz); in netcp_empty_rx_queue() 653 dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz); in netcp_process_one_rx_packet() 959 knav_queue_enable_notify(netcp->rx_queue); in netcp_rx_poll() 969 knav_queue_disable_notify(netcp->rx_queue); in netcp_rx_notify() 1561 if (!IS_ERR_OR_NULL(netcp->rx_queue)) { in netcp_free_navigator_resources() 1562 knav_queue_close(netcp->rx_queue); in netcp_free_navigator_resources() 1563 netcp->rx_queue = NULL; in netcp_free_navigator_resources() 1634 netcp->rx_queue = knav_queue_open(name, netcp->rx_queue_id, 0); in netcp_setup_navigator_resources() 1635 if (IS_ERR(netcp->rx_queue)) { in netcp_setup_navigator_resources() 1636 ret = PTR_ERR(netcp->rx_queue); in netcp_setup_navigator_resources() [all …]
|
| /Linux-v5.4/drivers/net/wireless/intersil/p54/ |
| D | p54usb.c | 146 skb_unlink(skb, &priv->rx_queue); in p54u_rx_cb() 185 skb_queue_tail(&priv->rx_queue, skb); in p54u_rx_cb() 188 skb_unlink(skb, &priv->rx_queue); in p54u_rx_cb() 229 while (skb_queue_len(&priv->rx_queue) < 32) { in p54u_init_urbs() 248 skb_queue_tail(&priv->rx_queue, skb); in p54u_init_urbs() 253 skb_unlink(skb, &priv->rx_queue); in p54u_init_urbs() 1006 skb_queue_head_init(&priv->rx_queue); in p54u_probe()
|
| /Linux-v5.4/net/vmw_vsock/ |
| D | virtio_transport_common.c | 282 while (total < len && !list_empty(&vvs->rx_queue)) { in virtio_transport_stream_do_dequeue() 283 pkt = list_first_entry(&vvs->rx_queue, in virtio_transport_stream_do_dequeue() 423 INIT_LIST_HEAD(&vvs->rx_queue); in virtio_transport_do_socket_init() 834 list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) { in virtio_transport_release() 903 if (pkt->len <= GOOD_COPY_LEN && !list_empty(&vvs->rx_queue)) { in virtio_transport_recv_enqueue() 906 last_pkt = list_last_entry(&vvs->rx_queue, in virtio_transport_recv_enqueue() 921 list_add_tail(&pkt->list, &vvs->rx_queue); in virtio_transport_recv_enqueue()
|
| /Linux-v5.4/drivers/net/dsa/mv88e6xxx/ |
| D | hwtstamp.c | 348 skb = skb_dequeue(&ps->rx_queue); in mv88e6xxx_rxtstamp_work() 352 &ps->rx_queue); in mv88e6xxx_rxtstamp_work() 387 skb_queue_tail(&ps->rx_queue, skb); in mv88e6xxx_port_rxtstamp() 576 skb_queue_head_init(&ps->rx_queue); in mv88e6xxx_hwtstamp_port_setup()
|