/Linux-v5.4/block/ |
D | blk-mq-cpumap.c | 18 static int queue_index(struct blk_mq_queue_map *qmap, in queue_index() argument 21 return qmap->queue_offset + (q % nr_queues); in queue_index() 35 int blk_mq_map_queues(struct blk_mq_queue_map *qmap) in blk_mq_map_queues() argument 37 unsigned int *map = qmap->mq_map; in blk_mq_map_queues() 38 unsigned int nr_queues = qmap->nr_queues; in blk_mq_map_queues() 51 map[cpu] = queue_index(qmap, nr_queues, q++); in blk_mq_map_queues() 64 map[cpu] = queue_index(qmap, nr_queues, q++); in blk_mq_map_queues() 68 map[cpu] = queue_index(qmap, nr_queues, q++); in blk_mq_map_queues() 86 int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index) in blk_mq_hw_queue_to_node() argument 91 if (index == qmap->mq_map[i]) in blk_mq_hw_queue_to_node()
|
D | blk-mq-pci.c | 26 int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev, in blk_mq_pci_map_queues() argument 32 for (queue = 0; queue < qmap->nr_queues; queue++) { in blk_mq_pci_map_queues() 38 qmap->mq_map[cpu] = qmap->queue_offset + queue; in blk_mq_pci_map_queues() 44 WARN_ON_ONCE(qmap->nr_queues > 1); in blk_mq_pci_map_queues() 45 blk_mq_clear_mq_map(qmap); in blk_mq_pci_map_queues()
|
D | blk-mq-virtio.c | 24 int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap, in blk_mq_virtio_map_queues() argument 33 for (queue = 0; queue < qmap->nr_queues; queue++) { in blk_mq_virtio_map_queues() 39 qmap->mq_map[cpu] = qmap->queue_offset + queue; in blk_mq_virtio_map_queues() 44 return blk_mq_map_queues(qmap); in blk_mq_virtio_map_queues()
|
D | blk-mq.h | 81 extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int); 228 static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) in blk_mq_clear_mq_map() argument 233 qmap->mq_map[cpu] = 0; in blk_mq_clear_mq_map()
|
/Linux-v5.4/include/linux/ |
D | blk-mq-pci.h | 8 int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
|
D | blk-mq-virtio.h | 8 int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
|
D | blk-mq.h | 339 int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
|
/Linux-v5.4/Documentation/ABI/testing/ |
D | sysfs-class-net-qmi | 32 Write a number ranging from 1 to 254 to add a qmap mux 50 created qmap mux based network device.
|
/Linux-v5.4/drivers/net/ethernet/broadcom/bnxt/ |
D | bnxt_dcb.c | 178 unsigned long qmap = 0; in bnxt_queue_remap() local 187 __set_bit(j, &qmap); in bnxt_queue_remap() 199 j = find_next_zero_bit(&qmap, max, j); in bnxt_queue_remap() 201 __set_bit(j, &qmap); in bnxt_queue_remap()
|
/Linux-v5.4/arch/powerpc/kvm/ |
D | book3s_xive.h | 129 u8 qmap; member
|
D | book3s_xive.c | 290 if (xive->qmap & (1 << prio)) in xive_check_provisioning() 309 xive->qmap |= (1 << prio); in xive_check_provisioning() 1307 if (xive->qmap & (1 << i)) { in kvmppc_xive_connect_vcpu()
|
/Linux-v5.4/drivers/net/ethernet/intel/ice/ |
D | ice_virtchnl_pf.c | 2132 unsigned long qmap; in ice_vc_cfg_irq_map_msg() local 2184 qmap = map->rxq_map; in ice_vc_cfg_irq_map_msg() 2186 for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) { in ice_vc_cfg_irq_map_msg() 2198 qmap = map->txq_map; in ice_vc_cfg_irq_map_msg() 2200 for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) { in ice_vc_cfg_irq_map_msg()
|
D | ice_lib.c | 870 u16 offset = 0, qmap = 0, tx_count = 0; in ice_vsi_setup_q_map() local 945 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & in ice_vsi_setup_q_map() 951 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); in ice_vsi_setup_q_map()
|
/Linux-v5.4/drivers/scsi/ |
D | virtio_scsi.c | 706 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; in virtscsi_map_queues() local 708 return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2); in virtscsi_map_queues()
|
/Linux-v5.4/drivers/net/ethernet/intel/i40e/ |
D | i40e_main.c | 1678 u16 qcount = 0, max_qcount, qmap, sections = 0; in i40e_vsi_setup_queue_map_mqprio() local 1694 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | in i40e_vsi_setup_queue_map_mqprio() 1724 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in i40e_vsi_setup_queue_map_mqprio() 1773 u16 qmap; in i40e_vsi_setup_queue_map() local 1841 qmap = in i40e_vsi_setup_queue_map() 1855 qmap = 0; in i40e_vsi_setup_queue_map() 1857 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); in i40e_vsi_setup_queue_map() 5800 u16 qcount, qmap, sections = 0; in i40e_channel_setup_queue_map() local 5815 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | in i40e_channel_setup_queue_map() 5819 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in i40e_channel_setup_queue_map() [all …]
|
/Linux-v5.4/drivers/net/ethernet/marvell/mvpp2/ |
D | mvpp2_main.c | 1784 u32 qmap; in mvpp2_egress_enable() local 1789 qmap = 0; in mvpp2_egress_enable() 1794 qmap |= (1 << queue); in mvpp2_egress_enable() 1798 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); in mvpp2_egress_enable()
|
/Linux-v5.4/drivers/scsi/qla2xxx/ |
D | qla_os.c | 7113 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; in qla2xxx_map_queues() local 7116 rc = blk_mq_map_queues(qmap); in qla2xxx_map_queues() 7118 rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset); in qla2xxx_map_queues()
|