/Linux-v5.15/net/sched/ |
D | sch_multiq.c | 25 struct Qdisc **queues; member 54 return q->queues[0]; in multiq_classify() 56 return q->queues[band]; in multiq_classify() 105 qdisc = q->queues[q->curband]; in multiq_dequeue() 137 qdisc = q->queues[curband]; in multiq_peek() 154 qdisc_reset(q->queues[band]); in multiq_reset() 167 qdisc_put(q->queues[band]); in multiq_destroy() 169 kfree(q->queues); in multiq_destroy() 197 if (q->queues[i] != &noop_qdisc) { in multiq_tune() 198 struct Qdisc *child = q->queues[i]; in multiq_tune() [all …]
|
D | sch_prio.c | 26 struct Qdisc *queues[TCQ_PRIO_BANDS]; member 57 return q->queues[q->prio2band[band & TC_PRIO_MAX]]; in prio_classify() 63 return q->queues[q->prio2band[0]]; in prio_classify() 65 return q->queues[band]; in prio_classify() 103 struct Qdisc *qdisc = q->queues[prio]; in prio_peek() 117 struct Qdisc *qdisc = q->queues[prio]; in prio_dequeue() 137 qdisc_reset(q->queues[prio]); in prio_reset() 175 qdisc_put(q->queues[prio]); in prio_destroy() 182 struct Qdisc *queues[TCQ_PRIO_BANDS]; in prio_tune() local 200 queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in prio_tune() [all …]
|
/Linux-v5.15/drivers/staging/wfx/ |
D | queue.c | 234 struct wfx_queue *queues[IEEE80211_NUM_ACS * ARRAY_SIZE(wdev->vif)]; in wfx_tx_queues_get_skb() local 244 WARN_ON(num_queues >= ARRAY_SIZE(queues)); in wfx_tx_queues_get_skb() 245 queues[num_queues] = &wvif->tx_queue[i]; in wfx_tx_queues_get_skb() 247 if (wfx_tx_queue_get_weight(queues[j]) < in wfx_tx_queues_get_skb() 248 wfx_tx_queue_get_weight(queues[j - 1])) in wfx_tx_queues_get_skb() 249 swap(queues[j - 1], queues[j]); in wfx_tx_queues_get_skb() 259 skb = skb_dequeue(&queues[i]->cab); in wfx_tx_queues_get_skb() 267 WARN_ON(queues[i] != in wfx_tx_queues_get_skb() 269 atomic_inc(&queues[i]->pending_frames); in wfx_tx_queues_get_skb() 270 trace_queues_stats(wdev, queues[i]); in wfx_tx_queues_get_skb() [all …]
|
/Linux-v5.15/drivers/scsi/aacraid/ |
D | comminit.c | 373 struct aac_entry * queues; in aac_comm_init() local 375 struct aac_queue_block * comm = dev->queues; in aac_comm_init() 394 queues = (struct aac_entry *)(((ulong)headers) + hdrsize); in aac_comm_init() 397 comm->queue[HostNormCmdQueue].base = queues; in aac_comm_init() 399 queues += HOST_NORM_CMD_ENTRIES; in aac_comm_init() 403 comm->queue[HostHighCmdQueue].base = queues; in aac_comm_init() 406 queues += HOST_HIGH_CMD_ENTRIES; in aac_comm_init() 410 comm->queue[AdapNormCmdQueue].base = queues; in aac_comm_init() 413 queues += ADAP_NORM_CMD_ENTRIES; in aac_comm_init() 417 comm->queue[AdapHighCmdQueue].base = queues; in aac_comm_init() [all …]
|
/Linux-v5.15/Documentation/ABI/testing/ |
D | sysfs-class-net-queues | 1 What: /sys/class/<iface>/queues/rx-<queue>/rps_cpus 11 What: /sys/class/<iface>/queues/rx-<queue>/rps_flow_cnt 19 What: /sys/class/<iface>/queues/tx-<queue>/tx_timeout 27 What: /sys/class/<iface>/queues/tx-<queue>/tx_maxrate 35 What: /sys/class/<iface>/queues/tx-<queue>/xps_cpus 45 What: /sys/class/<iface>/queues/tx-<queue>/xps_rxqs 56 What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time 65 What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/inflight 73 What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit 82 What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_max [all …]
|
/Linux-v5.15/sound/virtio/ |
D | virtio_card.h | 51 struct virtio_snd_queue queues[VIRTIO_SND_VQ_MAX]; member 70 return &snd->queues[VIRTIO_SND_VQ_CONTROL]; in virtsnd_control_queue() 76 return &snd->queues[VIRTIO_SND_VQ_EVENT]; in virtsnd_event_queue() 82 return &snd->queues[VIRTIO_SND_VQ_TX]; in virtsnd_tx_queue() 88 return &snd->queues[VIRTIO_SND_VQ_RX]; in virtsnd_rx_queue()
|
/Linux-v5.15/drivers/nvme/target/ |
D | loop.c | 30 struct nvme_loop_queue *queues; member 71 return queue - queue->ctrl->queues; in nvme_loop_queue_idx() 176 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_submit_async_event() 198 iod->queue = &ctrl->queues[queue_idx]; in nvme_loop_init_iod() 222 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_loop_init_hctx() 242 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_init_admin_hctx() 266 if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags)) in nvme_loop_destroy_admin_queue() 268 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); in nvme_loop_destroy_admin_queue() 289 kfree(ctrl->queues); in nvme_loop_free_ctrl() 300 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); in nvme_loop_destroy_io_queues() [all …]
|
/Linux-v5.15/Documentation/devicetree/bindings/soc/ti/ |
D | keystone-navigator-qmss.txt | 9 management of the packet queues. Packets are queued/de-queued by writing or 32 -- managed-queues : the actual queues managed by each queue manager 33 instance, specified as <"base queue #" "# of queues">. 51 - qpend : pool of qpend(interruptible) queues 52 - general-purpose : pool of general queues, primarily used 53 as free descriptor queues or the 54 transmit DMA queues. 55 - accumulator : pool of queues on PDSP accumulator channel 57 -- qrange : number of queues to use per queue range, specified as 58 <"base queue #" "# of queues">. [all …]
|
/Linux-v5.15/Documentation/networking/device_drivers/ethernet/ti/ |
D | cpsw.rst | 26 - TX queues must be rated starting from txq0 that has highest priority 28 - CBS shapers should be used with rated queues 30 potential incoming rate, thus, rate of all incoming tx queues has 150 // Add 4 tx queues, for interface Eth0, and 1 tx queue for Eth1 156 // Check if num of queues is set correctly: 172 // TX queues must be rated starting from 0, so set bws for tx0 and tx1 175 // Leave last 2 tx queues not rated. 176 $ echo 40 > /sys/class/net/eth0/queues/tx-0/tx_maxrate 177 $ echo 20 > /sys/class/net/eth0/queues/tx-1/tx_maxrate 181 // Check maximum rate of tx (cpdma) queues: [all …]
|
/Linux-v5.15/tools/perf/util/ |
D | intel-bts.c | 46 struct auxtrace_queues queues; member 211 for (i = 0; i < bts->queues.nr_queues; i++) { in intel_bts_setup_queues() 212 ret = intel_bts_setup_queue(bts, &bts->queues.queue_array[i], in intel_bts_setup_queues() 222 if (bts->queues.new_data) { in intel_bts_update_queues() 223 bts->queues.new_data = false; in intel_bts_update_queues() 465 queue = &btsq->bts->queues.queue_array[btsq->queue_nr]; in intel_bts_process_queue() 539 struct auxtrace_queues *queues = &bts->queues; in intel_bts_process_tid_exit() local 542 for (i = 0; i < queues->nr_queues; i++) { in intel_bts_process_tid_exit() 543 struct auxtrace_queue *queue = &bts->queues.queue_array[i]; in intel_bts_process_tid_exit() 568 queue = &bts->queues.queue_array[queue_nr]; in intel_bts_process_queues() [all …]
|
D | arm-spe.c | 41 struct auxtrace_queues queues; member 146 queue = &speq->spe->queues.queue_array[speq->queue_nr]; in arm_spe_get_trace() 552 for (i = 0; i < spe->queues.nr_queues; i++) { in arm_spe__setup_queues() 553 ret = arm_spe__setup_queue(spe, &spe->queues.queue_array[i], i); in arm_spe__setup_queues() 563 if (spe->queues.new_data) { in arm_spe__update_queues() 564 spe->queues.new_data = false; in arm_spe__update_queues() 631 queue = &spe->queues.queue_array[queue_nr]; in arm_spe_process_queues() 667 struct auxtrace_queues *queues = &spe->queues; in arm_spe_process_timeless_queues() local 671 for (i = 0; i < queues->nr_queues; i++) { in arm_spe_process_timeless_queues() 672 struct auxtrace_queue *queue = &spe->queues.queue_array[i]; in arm_spe_process_timeless_queues() [all …]
|
D | s390-cpumsf.c | 169 struct auxtrace_queues queues; member 202 if (!sf->use_logfile || sf->queues.nr_queues <= sample->cpu) in s390_cpumcf_dumpctr() 205 q = &sf->queues.queue_array[sample->cpu]; in s390_cpumcf_dumpctr() 700 queue = &sfq->sf->queues.queue_array[sfq->queue_nr]; in s390_cpumsf_run_decoder() 824 for (i = 0; i < sf->queues.nr_queues; i++) { in s390_cpumsf_setup_queues() 825 ret = s390_cpumsf_setup_queue(sf, &sf->queues.queue_array[i], in s390_cpumsf_setup_queues() 835 if (!sf->queues.new_data) in s390_cpumsf_update_queues() 838 sf->queues.new_data = false; in s390_cpumsf_update_queues() 859 queue = &sf->queues.queue_array[queue_nr]; in s390_cpumsf_process_queues() 984 err = auxtrace_queues__add_event(&sf->queues, session, event, in s390_cpumsf_process_auxtrace_event() [all …]
|
D | auxtrace.c | 211 int auxtrace_queues__init(struct auxtrace_queues *queues) in auxtrace_queues__init() argument 213 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES; in auxtrace_queues__init() 214 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues); in auxtrace_queues__init() 215 if (!queues->queue_array) in auxtrace_queues__init() 220 static int auxtrace_queues__grow(struct auxtrace_queues *queues, in auxtrace_queues__grow() argument 223 unsigned int nr_queues = queues->nr_queues; in auxtrace_queues__grow() 233 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues) in auxtrace_queues__grow() 240 for (i = 0; i < queues->nr_queues; i++) { in auxtrace_queues__grow() 241 list_splice_tail(&queues->queue_array[i].head, in auxtrace_queues__grow() 243 queue_array[i].tid = queues->queue_array[i].tid; in auxtrace_queues__grow() [all …]
|
/Linux-v5.15/Documentation/arm/keystone/ |
D | knav-qmss.rst | 15 management of the packet queues. Packets are queued/de-queued by writing or 24 knav qmss driver provides a set of APIs to drivers to open/close qmss queues, 25 allocate descriptor pools, map the descriptors, push/pop to queues etc. For 31 Accumulator QMSS queues using PDSP firmware 34 queue or multiple contiguous queues. drivers/soc/ti/knav_qmss_acc.c is the 37 1 or 32 queues per channel. More description on the firmware is available in 56 Use of accumulated queues requires the firmware image to be present in the 57 file system. The driver doesn't acc queues to the supported queue range if
|
/Linux-v5.15/drivers/target/ |
D | target_core_tmr.c | 127 flush_work(&dev->queues[i].sq.work); in core_tmr_abort_task() 129 spin_lock_irqsave(&dev->queues[i].lock, flags); in core_tmr_abort_task() 130 list_for_each_entry_safe(se_cmd, next, &dev->queues[i].state_list, in core_tmr_abort_task() 157 spin_unlock_irqrestore(&dev->queues[i].lock, flags); in core_tmr_abort_task() 179 spin_unlock_irqrestore(&dev->queues[i].lock, flags); in core_tmr_abort_task() 307 flush_work(&dev->queues[i].sq.work); in core_tmr_drain_state_list() 309 spin_lock_irqsave(&dev->queues[i].lock, flags); in core_tmr_drain_state_list() 310 list_for_each_entry_safe(cmd, next, &dev->queues[i].state_list, in core_tmr_drain_state_list() 339 spin_unlock_irqrestore(&dev->queues[i].lock, flags); in core_tmr_drain_state_list()
|
/Linux-v5.15/include/linux/ |
D | ptr_ring.h | 625 void ***queues; in ptr_ring_resize_multiple() local 628 queues = kmalloc_array(nrings, sizeof(*queues), gfp); in ptr_ring_resize_multiple() 629 if (!queues) in ptr_ring_resize_multiple() 633 queues[i] = __ptr_ring_init_queue_alloc(size, gfp); in ptr_ring_resize_multiple() 634 if (!queues[i]) in ptr_ring_resize_multiple() 641 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], in ptr_ring_resize_multiple() 648 kvfree(queues[i]); in ptr_ring_resize_multiple() 650 kfree(queues); in ptr_ring_resize_multiple() 656 kvfree(queues[i]); in ptr_ring_resize_multiple() 658 kfree(queues); in ptr_ring_resize_multiple()
|
/Linux-v5.15/drivers/vdpa/virtio_pci/ |
D | vp_vdpa.c | 40 int queues; member 85 for (i = 0; i < vp_vdpa->queues; i++) { in vp_vdpa_free_irq() 131 int queues = vp_vdpa->queues; in vp_vdpa_request_irq() local 132 int vectors = queues + 1; in vp_vdpa_request_irq() 144 for (i = 0; i < queues; i++) { in vp_vdpa_request_irq() 163 irq = pci_irq_vector(pdev, queues); in vp_vdpa_request_irq() 171 vp_modern_config_vector(mdev, queues); in vp_vdpa_request_irq() 468 vp_vdpa->queues = vp_modern_get_num_queues(mdev); in vp_vdpa_probe() 477 vp_vdpa->vring = devm_kcalloc(&pdev->dev, vp_vdpa->queues, in vp_vdpa_probe() 486 for (i = 0; i < vp_vdpa->queues; i++) { in vp_vdpa_probe() [all …]
|
/Linux-v5.15/Documentation/block/ |
D | blk-mq.rst | 37 spawns multiple queues with individual entry points local to the CPU, removing 49 blk-mq has two group of queues: software staging queues and hardware dispatch 50 queues. When the request arrives at the block layer, it will try the shortest 56 Then, after the requests are processed by software queues, they will be placed 62 Software staging queues 65 The block IO subsystem adds requests in the software staging queues 71 the number of queues is defined by a per-CPU or per-node basis. 93 requests from different queues, otherwise there would be cache trashing and a 99 queue (a.k.a. run the hardware queue), the software queues mapped to that 102 Hardware dispatch queues [all …]
|
/Linux-v5.15/drivers/staging/qlge/ |
D | TODO | 13 * rename "rx" queues to "completion" queues. Calling tx completion queues "rx 14 queues" is confusing. 18 frames, resets the link, device and driver buffer queues become
|
/Linux-v5.15/Documentation/networking/ |
D | multiqueue.rst | 18 the subqueue memory, as well as netdev configuration of where the queues 21 The base driver will also need to manage the queues as it does the global 33 A new round-robin qdisc, sch_multiq also supports multiple hardware queues. The 35 bands and queues based on the value in skb->queue_mapping. Use this field in 42 On qdisc load, the number of bands is based on the number of queues on the 56 The qdisc will allocate the number of bands to equal the number of queues that 58 queues, the band mapping would look like::
|
D | scaling.rst | 27 Contemporary NICs support multiple receive and transmit descriptor queues 29 queues to distribute processing among CPUs. The NIC distributes packets by 47 Some advanced NICs allow steering packets to queues based on 57 module parameter for specifying the number of hardware queues to 60 for each CPU if the device supports enough queues, or otherwise at least 66 default mapping is to distribute the queues evenly in the table, but the 69 indirection table could be done to give different queues different 80 of queues to IRQs can be determined from /proc/interrupts. By default, 95 is to allocate as many queues as there are CPUs in the system (or the 97 is likely the one with the smallest number of receive queues where no [all …]
|
/Linux-v5.15/Documentation/devicetree/bindings/dma/ |
D | fsl-qdma.txt | 22 - fsl,dma-queues: Should contain number of queues supported. 28 based on queues 52 fsl,dma-queues = <2>;
|
/Linux-v5.15/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/ |
D | ethernet-driver.rst | 25 - queues, channels 32 hardware resources, like queues, do not have a corresponding MC object and 99 queues ---------------------- | | Buffer pool | 109 Frames are transmitted and received through hardware frame queues, which can be 111 enqueues TX frames on egress queues and after transmission is complete a TX 114 When frames are available on ingress queues, a data availability notification 116 queues in the same channel have available frames, only one notification is sent. 119 Each network interface can have multiple Rx, Tx and confirmation queues affined
|
/Linux-v5.15/drivers/net/ethernet/netronome/nfp/ |
D | nfp_net_debugfs.c | 150 struct dentry *queues, *tx, *rx, *xdp; in nfp_net_debugfs_vnic_add() local 164 queues = debugfs_create_dir("queue", nn->debugfs_dir); in nfp_net_debugfs_vnic_add() 166 rx = debugfs_create_dir("rx", queues); in nfp_net_debugfs_vnic_add() 167 tx = debugfs_create_dir("tx", queues); in nfp_net_debugfs_vnic_add() 168 xdp = debugfs_create_dir("xdp", queues); in nfp_net_debugfs_vnic_add()
|
/Linux-v5.15/Documentation/devicetree/bindings/mfd/ |
D | fsl-imx25-tsadc.txt | 3 This device combines two general purpose conversion queues one used for general 15 conversion queues. 20 This device includes two conversion queues which can be added as subnodes.
|