Home
last modified time | relevance | path

Searched refs:queue_num (Results 1 – 25 of 30) sorted by relevance

12

/Linux-v4.19/drivers/net/ethernet/samsung/sxgbe/
Dsxgbe_mtl.c64 static void sxgbe_mtl_set_txfifosize(void __iomem *ioaddr, int queue_num, in sxgbe_mtl_set_txfifosize() argument
71 reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num)); in sxgbe_mtl_set_txfifosize()
73 writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num)); in sxgbe_mtl_set_txfifosize()
76 static void sxgbe_mtl_set_rxfifosize(void __iomem *ioaddr, int queue_num, in sxgbe_mtl_set_rxfifosize() argument
83 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); in sxgbe_mtl_set_rxfifosize()
85 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); in sxgbe_mtl_set_rxfifosize()
88 static void sxgbe_mtl_enable_txqueue(void __iomem *ioaddr, int queue_num) in sxgbe_mtl_enable_txqueue() argument
92 reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num)); in sxgbe_mtl_enable_txqueue()
94 writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num)); in sxgbe_mtl_enable_txqueue()
97 static void sxgbe_mtl_disable_txqueue(void __iomem *ioaddr, int queue_num) in sxgbe_mtl_disable_txqueue() argument
[all …]
Dsxgbe_mtl.h67 void (*mtl_set_txfifosize)(void __iomem *ioaddr, int queue_num,
70 void (*mtl_set_rxfifosize)(void __iomem *ioaddr, int queue_num,
73 void (*mtl_enable_txqueue)(void __iomem *ioaddr, int queue_num);
75 void (*mtl_disable_txqueue)(void __iomem *ioaddr, int queue_num);
77 void (*set_tx_mtl_mode)(void __iomem *ioaddr, int queue_num,
80 void (*set_rx_mtl_mode)(void __iomem *ioaddr, int queue_num,
85 void (*mtl_fc_active)(void __iomem *ioaddr, int queue_num,
88 void (*mtl_fc_deactive)(void __iomem *ioaddr, int queue_num,
91 void (*mtl_fc_enable)(void __iomem *ioaddr, int queue_num);
93 void (*mtl_fep_enable)(void __iomem *ioaddr, int queue_num);
[all …]
Dsxgbe_main.c561 int queue_num, ret; in init_dma_desc_rings() local
567 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { in init_dma_desc_rings()
568 ret = init_tx_ring(priv->device, queue_num, in init_dma_desc_rings()
569 priv->txq[queue_num], tx_rsize); in init_dma_desc_rings()
578 priv->txq[queue_num]->priv_ptr = priv; in init_dma_desc_rings()
582 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) { in init_dma_desc_rings()
583 ret = init_rx_ring(netd, queue_num, in init_dma_desc_rings()
584 priv->rxq[queue_num], rx_rsize); in init_dma_desc_rings()
593 priv->rxq[queue_num]->priv_ptr = priv; in init_dma_desc_rings()
601 while (queue_num--) in init_dma_desc_rings()
[all …]
Dsxgbe_common.h197 #define SXGBE_FOR_EACH_QUEUE(max_queues, queue_num) \ argument
198 for (queue_num = 0; queue_num < max_queues; queue_num++)
361 void (*enable_rxqueue)(void __iomem *ioaddr, int queue_num);
362 void (*disable_rxqueue)(void __iomem *ioaddr, int queue_num);
Dsxgbe_core.c168 static void sxgbe_core_enable_rxqueue(void __iomem *ioaddr, int queue_num) in sxgbe_core_enable_rxqueue() argument
173 reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num); in sxgbe_core_enable_rxqueue()
178 static void sxgbe_core_disable_rxqueue(void __iomem *ioaddr, int queue_num) in sxgbe_core_disable_rxqueue() argument
183 reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num); in sxgbe_core_disable_rxqueue()
/Linux-v4.19/samples/pktgen/
Dpktgen_sample06_numa_awared_queue_irq_affinity.sh58 queue_num=$i
59 info "queue number is $queue_num"
60 pg_set $dev "queue_map_min $queue_num"
61 pg_set $dev "queue_map_max $queue_num"
/Linux-v4.19/net/netfilter/
Dnfnetlink_queue.c72 u_int16_t queue_num; /* number of this queue */ member
100 static inline u_int8_t instance_hashfn(u_int16_t queue_num) in instance_hashfn() argument
102 return ((queue_num >> 8) ^ queue_num) % INSTANCE_BUCKETS; in instance_hashfn()
106 instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num) in instance_lookup() argument
111 head = &q->instance_table[instance_hashfn(queue_num)]; in instance_lookup()
113 if (inst->queue_num == queue_num) in instance_lookup()
120 instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid) in instance_create() argument
127 if (instance_lookup(q, queue_num)) { in instance_create()
138 inst->queue_num = queue_num; in instance_create()
151 h = instance_hashfn(queue_num); in instance_create()
[all …]
Dnfnetlink_cthelper.c66 return NF_QUEUE_NR(helper->queue_num) | NF_VERDICT_FLAG_QUEUE_BYPASS; in nfnl_userspace_cthelper()
256 helper->queue_num = ntohl(nla_get_be32(tb[NFCTH_QUEUE_NUM])); in nfnl_cthelper_create()
394 helper->queue_num = ntohl(nla_get_be32(tb[NFCTH_QUEUE_NUM])); in nfnl_cthelper_update()
547 if (nla_put_be32(skb, NFCTH_QUEUE_NUM, htonl(helper->queue_num))) in nfnl_cthelper_fill_info()
/Linux-v4.19/drivers/net/dsa/
Dbcm_sf2_cfp.c210 unsigned int queue_num, in bcm_sf2_cfp_act_pol_set() argument
222 CHANGE_TC | queue_num << NEW_TC_SHIFT; in bcm_sf2_cfp_act_pol_set()
326 unsigned int queue_num, in bcm_sf2_cfp_ipv4_rule_set() argument
428 queue_num, true); in bcm_sf2_cfp_ipv4_rule_set()
520 unsigned int queue_num, in bcm_sf2_cfp_ipv6_rule_set() argument
646 queue_num, false); in bcm_sf2_cfp_ipv6_rule_set()
708 queue_num, true); in bcm_sf2_cfp_ipv6_rule_set()
737 unsigned int queue_num, port_num; in bcm_sf2_cfp_rule_set() local
774 queue_num = ring_cookie % SF2_NUM_EGRESS_QUEUES; in bcm_sf2_cfp_rule_set()
782 queue_num, fs); in bcm_sf2_cfp_rule_set()
[all …]
/Linux-v4.19/drivers/net/ethernet/hisilicon/hns3/
Dhns3_ethtool.c528 int queue_num = h->kinfo.num_tqps; in hns3_get_ringparam() local
534 param->rx_pending = priv->ring_data[queue_num].ring->desc_num; in hns3_get_ringparam()
834 u16 queue_num = h->kinfo.num_tqps; in hns3_get_coalesce_per_queue() local
836 if (queue >= queue_num) { in hns3_get_coalesce_per_queue()
839 queue, queue_num - 1); in hns3_get_coalesce_per_queue()
844 rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; in hns3_get_coalesce_per_queue()
967 int queue_num = h->kinfo.num_tqps; in hns3_set_coalesce_per_queue() local
970 rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; in hns3_set_coalesce_per_queue()
993 u16 queue_num = h->kinfo.num_tqps; in hns3_set_coalesce() local
1004 for (i = 0; i < queue_num; i++) in hns3_set_coalesce()
Dhns3_enet.c1257 int queue_num = priv->ae_handle->kinfo.num_tqps; in hns3_nic_get_stats64() local
1274 for (idx = 0; idx < queue_num; idx++) { in hns3_nic_get_stats64()
1286 ring = priv->ring_data[idx + queue_num].ring; in hns3_nic_get_stats64()
2801 int queue_num = priv->ae_handle->kinfo.num_tqps; in hns3_ring_get_cfg() local
2814 ring_data[q->tqp_index + queue_num].ring = ring; in hns3_ring_get_cfg()
2815 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index; in hns3_ring_get_cfg()
/Linux-v4.19/block/
Dblk-mq-tag.c431 new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, in blk_mq_tag_update_depth()
435 ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth); in blk_mq_tag_update_depth()
441 blk_mq_free_rqs(set, *tagsptr, hctx->queue_num); in blk_mq_tag_update_depth()
476 hwq = hctx->queue_num; in blk_mq_unique_tag()
Dblk-mq-sysfs.c238 ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num); in blk_mq_register_hctx()
Dblk-mq.c1669 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false); in request_to_qc_t()
1671 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true); in request_to_qc_t()
2540 hctxs[i]->queue_num = i; in blk_mq_realloc_hw_ctxs()
/Linux-v4.19/drivers/soc/ti/
Dknav_qmss_acc.c292 cmd->queue_num, cmd->timer_config); in knav_acc_write()
295 writel_relaxed(cmd->queue_num, &pdsp->acc_command->queue_num); in knav_acc_write()
332 cmd->queue_num = info->list_entries << 16; in knav_acc_setup_cmd()
333 cmd->queue_num |= queue_base; in knav_acc_setup_cmd()
Dknav_qmss.h99 u32 queue_num; member
/Linux-v4.19/drivers/net/wireless/broadcom/b43legacy/
Dmain.c2482 int queue_num; in b43legacy_tx_work() local
2492 for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) { in b43legacy_tx_work()
2493 while (skb_queue_len(&wl->tx_queue[queue_num])) { in b43legacy_tx_work()
2494 skb = skb_dequeue(&wl->tx_queue[queue_num]); in b43legacy_tx_work()
2500 wl->tx_queue_stopped[queue_num] = 1; in b43legacy_tx_work()
2501 ieee80211_stop_queue(wl->hw, queue_num); in b43legacy_tx_work()
2502 skb_queue_head(&wl->tx_queue[queue_num], skb); in b43legacy_tx_work()
2511 wl->tx_queue_stopped[queue_num] = 0; in b43legacy_tx_work()
2949 int queue_num; in b43legacy_wireless_core_stop() local
2973 for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) { in b43legacy_wireless_core_stop()
[all …]
/Linux-v4.19/include/linux/
Dblk_types.h427 static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num, in blk_tag_to_qc_t() argument
430 blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT); in blk_tag_to_qc_t()
Dblk-mq.h56 unsigned int queue_num; member
/Linux-v4.19/drivers/net/ethernet/chelsio/cxgb3/
Dcxgb3_ioctl.h70 uint32_t queue_num; member
/Linux-v4.19/include/net/netfilter/
Dnf_conntrack_helper.h54 unsigned int queue_num; member
/Linux-v4.19/drivers/net/wireless/broadcom/b43/
Dmain.c3610 int queue_num; in b43_tx_work() local
3620 for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) { in b43_tx_work()
3621 while (skb_queue_len(&wl->tx_queue[queue_num])) { in b43_tx_work()
3622 skb = skb_dequeue(&wl->tx_queue[queue_num]); in b43_tx_work()
3628 wl->tx_queue_stopped[queue_num] = 1; in b43_tx_work()
3629 ieee80211_stop_queue(wl->hw, queue_num); in b43_tx_work()
3630 skb_queue_head(&wl->tx_queue[queue_num], skb); in b43_tx_work()
3639 wl->tx_queue_stopped[queue_num] = 0; in b43_tx_work()
4349 int queue_num; in b43_wireless_core_stop() local
4402 for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) { in b43_wireless_core_stop()
[all …]
/Linux-v4.19/drivers/net/ethernet/ibm/
Dibmvnic.c1434 int queue_num = skb_get_queue_mapping(skb); in ibmvnic_xmit() local
1459 netif_stop_subqueue(netdev, queue_num); in ibmvnic_xmit()
1475 tx_pool = &adapter->tso_pool[queue_num]; in ibmvnic_xmit()
1477 tx_pool = &adapter->tx_pool[queue_num]; in ibmvnic_xmit()
1479 tx_scrq = adapter->tx_scrq[queue_num]; in ibmvnic_xmit()
1529 tx_buff->pool_index = queue_num; in ibmvnic_xmit()
1594 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], in ibmvnic_xmit()
1599 lpar_rc = send_subcrq(adapter, handle_array[queue_num], in ibmvnic_xmit()
1626 netdev_dbg(netdev, "Stopping queue %d\n", queue_num); in ibmvnic_xmit()
1627 netif_stop_subqueue(netdev, queue_num); in ibmvnic_xmit()
[all …]
/Linux-v4.19/drivers/net/wireless/st/cw1200/
Dwsm.c1669 int queue_num; in wsm_get_tx() local
1696 queue_num = queue - priv->tx_queue; in wsm_get_tx()
1733 if (priv->edca.params[queue_num].txop_limit) in wsm_get_tx()
1741 priv->tx_burst_idx = queue_num; in wsm_get_tx()
/Linux-v4.19/drivers/dma/ti/
Dcppi41.c292 static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num) in cppi41_pop_desc() argument
296 desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num)); in cppi41_pop_desc()

12