Home
last modified time | relevance | path

Searched refs:droq (Results 1 – 14 of 14) sorted by relevance

/Linux-v5.4/drivers/net/ethernet/cavium/liquidio/
Docteon_droq.c94 u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq) in octeon_droq_check_hw_for_pkts() argument
99 pkt_count = readl(droq->pkts_sent_reg); in octeon_droq_check_hw_for_pkts()
101 last_count = pkt_count - droq->pkt_count; in octeon_droq_check_hw_for_pkts()
102 droq->pkt_count = pkt_count; in octeon_droq_check_hw_for_pkts()
106 atomic_add(last_count, &droq->pkts_pending); in octeon_droq_check_hw_for_pkts()
111 static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq) in octeon_droq_compute_max_packet_bufs() argument
120 droq->max_empty_descs = 0; in octeon_droq_compute_max_packet_bufs()
123 droq->max_empty_descs++; in octeon_droq_compute_max_packet_bufs()
124 count += droq->buffer_size; in octeon_droq_compute_max_packet_bufs()
127 droq->max_empty_descs = droq->max_count - droq->max_empty_descs; in octeon_droq_compute_max_packet_bufs()
[all …]
Dlio_core.c429 struct octeon_droq *droq) in octeon_schedule_rxq_oom_work() argument
433 struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no]; in octeon_schedule_rxq_oom_work()
445 struct octeon_droq *droq = oct->droq[q_no]; in octnet_poll_check_rxq_oom_status() local
447 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING) || !droq) in octnet_poll_check_rxq_oom_status()
450 if (octeon_retry_droq_refill(droq)) in octnet_poll_check_rxq_oom_status()
451 octeon_schedule_rxq_oom_work(oct, droq); in octnet_poll_check_rxq_oom_status()
553 writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg); in octeon_setup_droq()
575 struct octeon_droq *droq = in liquidio_push_packet() local
591 droq->stats.rx_dropped++; in liquidio_push_packet()
597 skb_record_rx_queue(skb, droq->q_no); in liquidio_push_packet()
[all …]
Docteon_device.c651 vfree(oct->droq[i]); in octeon_free_device_mem()
929 oct->droq[0] = vzalloc_node(sizeof(*oct->droq[0]), numa_node); in octeon_setup_output_queues()
930 if (!oct->droq[0]) in octeon_setup_output_queues()
931 oct->droq[0] = vzalloc(sizeof(*oct->droq[0])); in octeon_setup_output_queues()
932 if (!oct->droq[0]) in octeon_setup_output_queues()
936 vfree(oct->droq[oq_no]); in octeon_setup_output_queues()
937 oct->droq[oq_no] = NULL; in octeon_setup_output_queues()
1284 return oct->droq[q_no]->max_count; in octeon_get_rx_qsize()
1433 void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) in lio_enable_irq() argument
1440 if (droq) { in lio_enable_irq()
[all …]
Docteon_droq.h400 u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq);
406 struct octeon_droq *droq,
410 struct octeon_droq *droq, u32 budget);
414 int octeon_retry_droq_refill(struct octeon_droq *droq);
Dcn66xx_device.c304 struct octeon_droq *droq = oct->droq[oq_no]; in lio_cn6xxx_setup_oq_regs() local
307 droq->desc_ring_dma); in lio_cn6xxx_setup_oq_regs()
308 octeon_write_csr(oct, CN6XXX_SLI_OQ_SIZE(oq_no), droq->max_count); in lio_cn6xxx_setup_oq_regs()
311 droq->buffer_size); in lio_cn6xxx_setup_oq_regs()
314 droq->pkts_sent_reg = in lio_cn6xxx_setup_oq_regs()
316 droq->pkts_credit_reg = in lio_cn6xxx_setup_oq_regs()
508 struct octeon_droq *droq; in lio_cn6xxx_process_droq_intr_regs() local
529 droq = oct->droq[oq_no]; in lio_cn6xxx_process_droq_intr_regs()
530 pkt_count = octeon_droq_check_hw_for_pkts(droq); in lio_cn6xxx_process_droq_intr_regs()
533 if (droq->ops.poll_mode) { in lio_cn6xxx_process_droq_intr_regs()
Dcn23xx_vf_device.c247 struct octeon_droq *droq = oct->droq[oq_no]; in cn23xx_setup_vf_oq_regs() local
250 droq->desc_ring_dma); in cn23xx_setup_vf_oq_regs()
251 octeon_write_csr(oct, CN23XX_VF_SLI_OQ_SIZE(oq_no), droq->max_count); in cn23xx_setup_vf_oq_regs()
254 droq->buffer_size); in cn23xx_setup_vf_oq_regs()
257 droq->pkts_sent_reg = in cn23xx_setup_vf_oq_regs()
259 droq->pkts_credit_reg = in cn23xx_setup_vf_oq_regs()
491 struct octeon_droq *droq = oct->droq[ioq_vector->droq_index]; in cn23xx_vf_msix_interrupt_handler() local
496 pkts_sent = readq(droq->pkts_sent_reg); in cn23xx_vf_msix_interrupt_handler()
Dlio_ethtool.c971 rx_pending = oct->droq[0]->max_count; in lio_ethtool_get_ringparam()
1215 writel(oct->droq[i]->max_count, in lio_reset_queues()
1216 oct->droq[i]->pkts_credit_reg); in lio_reset_queues()
1275 rx_count_old = oct->droq[0]->max_count; in lio_ethtool_set_ringparam()
1683 CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received); in lio_get_ethtool_stats()
1686 CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received); in lio_get_ethtool_stats()
1688 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + in lio_get_ethtool_stats()
1689 oct_dev->droq[j]->stats.dropped_toomany + in lio_get_ethtool_stats()
1690 oct_dev->droq[j]->stats.rx_dropped); in lio_get_ethtool_stats()
1692 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); in lio_get_ethtool_stats()
[all …]
Dcn23xx_pf_device.c631 struct octeon_droq *droq = oct->droq[oq_no]; in cn23xx_setup_oq_regs() local
639 droq->desc_ring_dma); in cn23xx_setup_oq_regs()
640 octeon_write_csr(oct, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count); in cn23xx_setup_oq_regs()
643 droq->buffer_size); in cn23xx_setup_oq_regs()
646 droq->pkts_sent_reg = in cn23xx_setup_oq_regs()
648 droq->pkts_credit_reg = in cn23xx_setup_oq_regs()
948 struct octeon_droq *droq = oct->droq[ioq_vector->droq_index]; in cn23xx_pf_msix_interrupt_handler() local
952 if (!droq) { in cn23xx_pf_msix_interrupt_handler()
958 pkts_sent = readq(droq->pkts_sent_reg); in cn23xx_pf_msix_interrupt_handler()
Docteon_mailbox.c222 if (!oct->droq[i]) in get_vf_stats()
224 stats->rx_packets += oct->droq[i]->stats.rx_pkts_received; in get_vf_stats()
225 stats->rx_bytes += oct->droq[i]->stats.rx_bytes_received; in get_vf_stats()
Docteon_main.h75 struct octeon_droq *droq);
Docteon_device.h495 struct octeon_droq *droq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES]; member
901 void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq);
Docteon_network.h490 static inline void octeon_fast_packet_next(struct octeon_droq *droq, in octeon_fast_packet_next() argument
495 skb_put_data(nicbuf, get_rbd(droq->recv_buf_list[idx].buffer), in octeon_fast_packet_next()
Dlio_vf_main.c88 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); in lio_wait_for_oq_pkts()
683 oct->droq[0]->ops.poll_mode = 0; in liquidio_destroy_nic_device()
921 oct->droq[0]->ops.poll_mode = 1; in liquidio_open()
983 oct->droq[0]->ops.poll_mode = 0; in liquidio_stop()
1200 oq_stats = &oct->droq[oq_no]->stats; in liquidio_get_stats64()
2399 writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg); in octeon_device_init()
Dlio_main.c176 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], in octeon_droq_bh()
178 lio_enable_irq(oct->droq[q_no], NULL); in octeon_droq_bh()
211 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); in lio_wait_for_oq_pkts()
1271 oct->droq[0]->ops.poll_mode = 0; in liquidio_destroy_nic_device()
1829 oct->droq[0]->ops.poll_mode = 1; in liquidio_open()
1923 oct->droq[0]->ops.poll_mode = 0; in liquidio_stop()
2095 oq_stats = &oct->droq[oq_no]->stats; in liquidio_get_stats64()
4247 writel(octeon_dev->droq[j]->max_count, in octeon_device_init()
4248 octeon_dev->droq[j]->pkts_credit_reg); in octeon_device_init()