| /Linux-v5.4/block/ |
| D | blk-mq-cpumap.c | 19 unsigned int nr_queues, const int q) in queue_index() argument 21 return qmap->queue_offset + (q % nr_queues); in queue_index() 38 unsigned int nr_queues = qmap->nr_queues; in blk_mq_map_queues() local 49 if (q >= nr_queues) in blk_mq_map_queues() 51 map[cpu] = queue_index(qmap, nr_queues, q++); in blk_mq_map_queues() 63 if (q < nr_queues) { in blk_mq_map_queues() 64 map[cpu] = queue_index(qmap, nr_queues, q++); in blk_mq_map_queues() 68 map[cpu] = queue_index(qmap, nr_queues, q++); in blk_mq_map_queues()
|
| D | blk-mq-pci.c | 32 for (queue = 0; queue < qmap->nr_queues; queue++) { in blk_mq_pci_map_queues() 44 WARN_ON_ONCE(qmap->nr_queues > 1); in blk_mq_pci_map_queues()
|
| D | blk-mq-rdma.c | 30 for (queue = 0; queue < map->nr_queues; queue++) { in blk_mq_rdma_map_queues()
|
| D | blk-mq-virtio.c | 33 for (queue = 0; queue < qmap->nr_queues; queue++) { in blk_mq_virtio_map_queues()
|
| /Linux-v5.4/drivers/crypto/cavium/cpt/ |
| D | cptvf_main.c | 42 if (cptvf->nr_queues) { in init_worker_threads() 44 cptvf->nr_queues); in init_worker_threads() 47 for (i = 0; i < cptvf->nr_queues; i++) { in init_worker_threads() 69 if (cptvf->nr_queues) { in cleanup_worker_threads() 71 cptvf->nr_queues); in cleanup_worker_threads() 74 for (i = 0; i < cptvf->nr_queues; i++) in cleanup_worker_threads() 100 pqinfo->nr_queues = 0; in free_pending_queues() 104 u32 nr_queues) in alloc_pending_queues() argument 111 pqinfo->nr_queues = nr_queues; in alloc_pending_queues() 139 static int init_pending_queues(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues) in init_pending_queues() argument [all …]
|
| D | cptvf.h | 85 u32 nr_queues; /* Number of queues supported */ member 91 for (i = 0, q = &qinfo->queue[i]; i < qinfo->nr_queues; i++, \ 110 u32 nr_queues; member
|
| D | cptvf_reqmanager.c | 233 if (unlikely(qno >= cptvf->nr_queues)) { in send_cpt_command() 235 qno, cptvf->nr_queues); in send_cpt_command() 551 if (unlikely(qno > cptvf->nr_queues)) { in vq_post_process()
|
| /Linux-v5.4/drivers/crypto/cavium/nitrox/ |
| D | nitrox_sriov.c | 58 int nr_queues = 0; in vf_mode_to_nr_queues() local 62 nr_queues = MAX_PF_QUEUES; in vf_mode_to_nr_queues() 65 nr_queues = 8; in vf_mode_to_nr_queues() 68 nr_queues = 4; in vf_mode_to_nr_queues() 71 nr_queues = 2; in vf_mode_to_nr_queues() 74 nr_queues = 1; in vf_mode_to_nr_queues() 78 return nr_queues; in vf_mode_to_nr_queues()
|
| D | nitrox_lib.c | 91 for (i = 0; i < ndev->nr_queues; i++) { in nitrox_free_aqm_queues() 102 for (i = 0; i < ndev->nr_queues; i++) { in nitrox_alloc_aqm_queues() 142 for (i = 0; i < ndev->nr_queues; i++) { in nitrox_free_pktin_queues() 155 ndev->pkt_inq = kcalloc_node(ndev->nr_queues, in nitrox_alloc_pktin_queues() 161 for (i = 0; i < ndev->nr_queues; i++) { in nitrox_alloc_pktin_queues()
|
| D | nitrox_dev.h | 145 int nr_queues; member 235 u16 nr_queues; member
|
| D | nitrox_mbx.c | 65 vfdev->nr_queues = vfdev->msg.data; in pf2vf_send_response() 73 vfdev->nr_queues = 0; in pf2vf_send_response()
|
| D | nitrox_hal.c | 123 for (i = 0; i < ndev->nr_queues; i++) { in nitrox_config_pkt_input_rings() 239 for (i = 0; i < ndev->nr_queues; i++) in nitrox_config_pkt_solicit_ports() 355 for (ring = 0; ring < ndev->nr_queues; ring++) { in nitrox_config_aqm_rings()
|
| /Linux-v5.4/tools/perf/util/ |
| D | auxtrace.c | 149 static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues) in auxtrace_alloc_queue_array() argument 155 if (nr_queues > max_nr_queues) in auxtrace_alloc_queue_array() 158 queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue)); in auxtrace_alloc_queue_array() 162 for (i = 0; i < nr_queues; i++) { in auxtrace_alloc_queue_array() 172 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES; in auxtrace_queues__init() 173 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues); in auxtrace_queues__init() 182 unsigned int nr_queues = queues->nr_queues; in auxtrace_queues__grow() local 186 if (!nr_queues) in auxtrace_queues__grow() 187 nr_queues = AUXTRACE_INIT_NR_QUEUES; in auxtrace_queues__grow() 189 while (nr_queues && nr_queues < new_nr_queues) in auxtrace_queues__grow() [all …]
|
| D | arm-spe.c | 161 for (i = 0; i < queues->nr_queues; i++) { in arm_spe_free_events()
|
| D | s390-cpumsf.c | 202 if (!sf->use_logfile || sf->queues.nr_queues <= sample->cpu) in s390_cpumcf_dumpctr() 824 for (i = 0; i < sf->queues.nr_queues; i++) { in s390_cpumsf_setup_queues() 1018 for (i = 0; i < queues->nr_queues; i++) { in s390_cpumsf_free_queues()
|
| D | intel-bts.c | 211 for (i = 0; i < bts->queues.nr_queues; i++) { in intel_bts_setup_queues() 542 for (i = 0; i < queues->nr_queues; i++) { in intel_bts_process_tid_exit() 713 for (i = 0; i < queues->nr_queues; i++) { in intel_bts_free_events()
|
| D | intel-pt.c | 1125 for (i = 0; i < pt->queues.nr_queues; i++) { in intel_pt_setup_queues() 2075 for (i = 0; i < pt->queues.nr_queues; i++) { in intel_pt_enable_sync_switch() 2310 for (i = 0; i < queues->nr_queues; i++) { in intel_pt_process_timeless_queues() 2333 if (cpu < 0 || !pt->queues.nr_queues) in intel_pt_cpu_to_ptq() 2336 if ((unsigned)cpu >= pt->queues.nr_queues) in intel_pt_cpu_to_ptq() 2337 i = pt->queues.nr_queues - 1; in intel_pt_cpu_to_ptq() 2349 for (; j < pt->queues.nr_queues; j++) { in intel_pt_cpu_to_ptq() 2617 for (i = 0; i < queues->nr_queues; i++) { in intel_pt_free_events()
|
| /Linux-v5.4/drivers/block/ |
| D | null_blk.h | 87 unsigned int nr_queues; member
|
| D | null_blk_main.c | 1277 if (nullb->nr_queues != 1) in nullb_to_queue() 1278 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); in nullb_to_queue() 1377 for (i = 0; i < nullb->nr_queues; i++) in cleanup_queues() 1460 nullb->nr_queues++; in null_init_queues() 1516 nullb->nr_queues++; in init_driver_queues()
|
| /Linux-v5.4/drivers/char/ |
| D | virtio_console.c | 1850 u32 i, j, nr_ports, nr_queues; in init_vqs() local 1854 nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; in init_vqs() 1856 vqs = kmalloc_array(nr_queues, sizeof(struct virtqueue *), GFP_KERNEL); in init_vqs() 1857 io_callbacks = kmalloc_array(nr_queues, sizeof(vq_callback_t *), in init_vqs() 1859 io_names = kmalloc_array(nr_queues, sizeof(char *), GFP_KERNEL); in init_vqs() 1897 err = virtio_find_vqs(portdev->vdev, nr_queues, vqs, in init_vqs()
|
| /Linux-v5.4/drivers/nvme/host/ |
| D | pci.c | 441 map->nr_queues = dev->io_queues[i]; in nvme_pci_map_queues() 442 if (!map->nr_queues) { in nvme_pci_map_queues() 456 qoff += map->nr_queues; in nvme_pci_map_queues() 457 offset += map->nr_queues; in nvme_pci_map_queues() 2236 int nr_queues = dev->online_queues - 1, sent = 0; in __nvme_disable_io_queues() local 2241 while (nr_queues > 0) { in __nvme_disable_io_queues() 2242 if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) in __nvme_disable_io_queues() 2244 nr_queues--; in __nvme_disable_io_queues() 2248 struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; in __nvme_disable_io_queues() 2261 if (nr_queues) in __nvme_disable_io_queues()
|
| D | rdma.c | 1826 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_rdma_map_queues() 1829 set->map[HCTX_TYPE_READ].nr_queues = in nvme_rdma_map_queues() 1835 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_rdma_map_queues() 1838 set->map[HCTX_TYPE_READ].nr_queues = in nvme_rdma_map_queues() 1849 set->map[HCTX_TYPE_POLL].nr_queues = in nvme_rdma_map_queues()
|
| D | tcp.c | 2179 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_tcp_map_queues() 2182 set->map[HCTX_TYPE_READ].nr_queues = in nvme_tcp_map_queues() 2188 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_tcp_map_queues() 2191 set->map[HCTX_TYPE_READ].nr_queues = in nvme_tcp_map_queues() 2200 set->map[HCTX_TYPE_POLL].nr_queues = in nvme_tcp_map_queues()
|
| /Linux-v5.4/drivers/s390/cio/ |
| D | qdio_setup.c | 137 static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) in __qdio_allocate_qs() argument 142 for (i = 0; i < nr_queues; i++) { in __qdio_allocate_qs()
|
| /Linux-v5.4/include/linux/ |
| D | blk-mq.h | 81 unsigned int nr_queues; member
|