Home
last modified time | relevance | path

Searched refs:queues (Results 1 – 25 of 270) sorted by relevance

1234567891011

/Linux-v4.19/net/sched/
Dsch_multiq.c36 struct Qdisc **queues; member
65 return q->queues[0]; in multiq_classify()
67 return q->queues[band]; in multiq_classify()
116 qdisc = q->queues[q->curband]; in multiq_dequeue()
148 qdisc = q->queues[curband]; in multiq_peek()
165 qdisc_reset(q->queues[band]); in multiq_reset()
178 qdisc_destroy(q->queues[band]); in multiq_destroy()
180 kfree(q->queues); in multiq_destroy()
202 if (q->queues[i] != &noop_qdisc) { in multiq_tune()
203 struct Qdisc *child = q->queues[i]; in multiq_tune()
[all …]
Dsch_prio.c30 struct Qdisc *queues[TCQ_PRIO_BANDS]; member
61 return q->queues[q->prio2band[band & TC_PRIO_MAX]]; in prio_classify()
67 return q->queues[q->prio2band[0]]; in prio_classify()
69 return q->queues[band]; in prio_classify()
106 struct Qdisc *qdisc = q->queues[prio]; in prio_peek()
120 struct Qdisc *qdisc = q->queues[prio]; in prio_dequeue()
140 qdisc_reset(q->queues[prio]); in prio_reset()
178 qdisc_destroy(q->queues[prio]); in prio_destroy()
185 struct Qdisc *queues[TCQ_PRIO_BANDS]; in prio_tune() local
203 queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in prio_tune()
[all …]
/Linux-v4.19/drivers/scsi/aacraid/
Dcomminit.c388 struct aac_entry * queues; in aac_comm_init() local
390 struct aac_queue_block * comm = dev->queues; in aac_comm_init()
409 queues = (struct aac_entry *)(((ulong)headers) + hdrsize); in aac_comm_init()
412 comm->queue[HostNormCmdQueue].base = queues; in aac_comm_init()
414 queues += HOST_NORM_CMD_ENTRIES; in aac_comm_init()
418 comm->queue[HostHighCmdQueue].base = queues; in aac_comm_init()
421 queues += HOST_HIGH_CMD_ENTRIES; in aac_comm_init()
425 comm->queue[AdapNormCmdQueue].base = queues; in aac_comm_init()
428 queues += ADAP_NORM_CMD_ENTRIES; in aac_comm_init()
432 comm->queue[AdapHighCmdQueue].base = queues; in aac_comm_init()
[all …]
/Linux-v4.19/tools/perf/util/
Darm-spe.c31 struct auxtrace_queues queues; member
123 err = auxtrace_queues__add_event(&spe->queues, session, event, in arm_spe_process_auxtrace_event()
159 struct auxtrace_queues *queues = &spe->queues; in arm_spe_free_events() local
162 for (i = 0; i < queues->nr_queues; i++) { in arm_spe_free_events()
163 arm_spe_free_queue(queues->queue_array[i].priv); in arm_spe_free_events()
164 queues->queue_array[i].priv = NULL; in arm_spe_free_events()
166 auxtrace_queues__free(queues); in arm_spe_free_events()
208 err = auxtrace_queues__init(&spe->queues); in arm_spe_process_auxtrace_info()
Ds390-cpumsf.c165 struct auxtrace_queues queues; member
578 queue = &sfq->sf->queues.queue_array[sfq->queue_nr]; in s390_cpumsf_run_decoder()
672 for (i = 0; i < sf->queues.nr_queues; i++) { in s390_cpumsf_setup_queues()
673 ret = s390_cpumsf_setup_queue(sf, &sf->queues.queue_array[i], in s390_cpumsf_setup_queues()
683 if (!sf->queues.new_data) in s390_cpumsf_update_queues()
686 sf->queues.new_data = false; in s390_cpumsf_update_queues()
707 queue = &sf->queues.queue_array[queue_nr]; in s390_cpumsf_process_queues()
819 err = auxtrace_queues__add_event(&sf->queues, session, event, in s390_cpumsf_process_auxtrace_event()
850 struct auxtrace_queues *queues = &sf->queues; in s390_cpumsf_free_queues() local
853 for (i = 0; i < queues->nr_queues; i++) in s390_cpumsf_free_queues()
[all …]
Dauxtrace.c174 int auxtrace_queues__init(struct auxtrace_queues *queues) in auxtrace_queues__init() argument
176 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES; in auxtrace_queues__init()
177 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues); in auxtrace_queues__init()
178 if (!queues->queue_array) in auxtrace_queues__init()
183 static int auxtrace_queues__grow(struct auxtrace_queues *queues, in auxtrace_queues__grow() argument
186 unsigned int nr_queues = queues->nr_queues; in auxtrace_queues__grow()
196 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues) in auxtrace_queues__grow()
203 for (i = 0; i < queues->nr_queues; i++) { in auxtrace_queues__grow()
204 list_splice_tail(&queues->queue_array[i].head, in auxtrace_queues__grow()
206 queue_array[i].tid = queues->queue_array[i].tid; in auxtrace_queues__grow()
[all …]
Dintel-bts.c53 struct auxtrace_queues queues; member
218 for (i = 0; i < bts->queues.nr_queues; i++) { in intel_bts_setup_queues()
219 ret = intel_bts_setup_queue(bts, &bts->queues.queue_array[i], in intel_bts_setup_queues()
229 if (bts->queues.new_data) { in intel_bts_update_queues()
230 bts->queues.new_data = false; in intel_bts_update_queues()
481 queue = &btsq->bts->queues.queue_array[btsq->queue_nr]; in intel_bts_process_queue()
555 struct auxtrace_queues *queues = &bts->queues; in intel_bts_process_tid_exit() local
558 for (i = 0; i < queues->nr_queues; i++) { in intel_bts_process_tid_exit()
559 struct auxtrace_queue *queue = &bts->queues.queue_array[i]; in intel_bts_process_tid_exit()
584 queue = &bts->queues.queue_array[queue_nr]; in intel_bts_process_queues()
[all …]
Dcs-etm.c44 struct auxtrace_queues queues; member
213 struct auxtrace_queues *queues = &aux->queues; in cs_etm__free_events() local
215 for (i = 0; i < queues->nr_queues; i++) { in cs_etm__free_events()
216 cs_etm__free_queue(queues->queue_array[i].priv); in cs_etm__free_events()
217 queues->queue_array[i].priv = NULL; in cs_etm__free_events()
220 auxtrace_queues__free(queues); in cs_etm__free_events()
428 for (i = 0; i < etm->queues.nr_queues; i++) { in cs_etm__setup_queues()
429 ret = cs_etm__setup_queue(etm, &etm->queues.queue_array[i], i); in cs_etm__setup_queues()
439 if (etm->queues.new_data) { in cs_etm__update_queues()
440 etm->queues.new_data = false; in cs_etm__update_queues()
[all …]
/Linux-v4.19/Documentation/ABI/testing/
Dsysfs-class-net-queues1 What: /sys/class/<iface>/queues/rx-<queue>/rps_cpus
11 What: /sys/class/<iface>/queues/rx-<queue>/rps_flow_cnt
19 What: /sys/class/<iface>/queues/tx-<queue>/tx_timeout
27 What: /sys/class/<iface>/queues/tx-<queue>/tx_maxrate
35 What: /sys/class/<iface>/queues/tx-<queue>/xps_cpus
45 What: /sys/class/<iface>/queues/tx-<queue>/xps_rxqs
56 What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
65 What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/inflight
73 What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit
82 What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_max
[all …]
/Linux-v4.19/Documentation/devicetree/bindings/soc/ti/
Dkeystone-navigator-qmss.txt9 management of the packet queues. Packets are queued/de-queued by writing or
32 -- managed-queues : the actual queues managed by each queue manager
33 instance, specified as <"base queue #" "# of queues">.
51 - qpend : pool of qpend(interruptible) queues
52 - general-purpose : pool of general queues, primarily used
53 as free descriptor queues or the
54 transmit DMA queues.
55 - accumulator : pool of queues on PDSP accumulator channel
57 -- qrange : number of queues to use per queue range, specified as
58 <"base queue #" "# of queues">.
[all …]
/Linux-v4.19/drivers/nvme/target/
Dloop.c38 struct nvme_loop_queue *queues; member
80 return queue - queue->ctrl->queues; in nvme_loop_queue_idx()
197 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_submit_async_event()
219 iod->queue = &ctrl->queues[queue_idx]; in nvme_loop_init_iod()
239 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_loop_init_hctx()
251 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_init_admin_hctx()
277 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); in nvme_loop_destroy_admin_queue()
278 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); in nvme_loop_destroy_admin_queue()
298 kfree(ctrl->queues); in nvme_loop_free_ctrl()
309 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); in nvme_loop_destroy_io_queues()
[all …]
/Linux-v4.19/Documentation/networking/
Dti-cpsw.txt21 - TX queues must be rated starting from txq0 that has highest priority
23 - CBS shapers should be used with rated queues
25 potential incoming rate, thus, rate of all incoming tx queues has
139 // Add 4 tx queues, for interface Eth0, and 1 tx queue for Eth1
144 // Check if num of queues is set correctly:
159 // TX queues must be rated starting from 0, so set bws for tx0 and tx1
162 // Leave last 2 tx queues not rated.
163 $ echo 40 > /sys/class/net/eth0/queues/tx-0/tx_maxrate
164 $ echo 20 > /sys/class/net/eth0/queues/tx-1/tx_maxrate
167 // Check maximum rate of tx (cpdma) queues:
[all …]
Dmultiqueue.txt18 the subqueue memory, as well as netdev configuration of where the queues
21 The base driver will also need to manage the queues as it does the global
34 A new round-robin qdisc, sch_multiq also supports multiple hardware queues. The
36 bands and queues based on the value in skb->queue_mapping. Use this field in
43 On qdisc load, the number of bands is based on the number of queues on the
57 The qdisc will allocate the number of bands to equal the number of queues that
59 queues, the band mapping would look like:
Dscaling.txt23 Contemporary NICs support multiple receive and transmit descriptor queues
25 queues to distribute processing among CPUs. The NIC distributes packets by
43 Some advanced NICs allow steering packets to queues based on
51 module parameter for specifying the number of hardware queues to
54 for each CPU if the device supports enough queues, or otherwise at least
60 default mapping is to distribute the queues evenly in the table, but the
63 indirection table could be done to give different queues different
72 of queues to IRQs can be determined from /proc/interrupts. By default,
85 is to allocate as many queues as there are CPUs in the system (or the
87 is likely the one with the smallest number of receive queues where no
[all …]
/Linux-v4.19/Documentation/arm/keystone/
Dknav-qmss.txt13 management of the packet queues. Packets are queued/de-queued by writing or
22 knav qmss driver provides a set of APIs to drivers to open/close qmss queues,
23 allocate descriptor pools, map the descriptors, push/pop to queues etc. For
29 Accumulator QMSS queues using PDSP firmware
32 queue or multiple contiguous queues. drivers/soc/ti/knav_qmss_acc.c is the
35 1 or 32 queues per channel. More description on the firmware is available in
52 Use of accumulated queues requires the firmware image to be present in the
53 file system. The driver doesn't acc queues to the supported queue range if
/Linux-v4.19/include/linux/
Dptr_ring.h627 void ***queues; in ptr_ring_resize_multiple() local
630 queues = kmalloc_array(nrings, sizeof(*queues), gfp); in ptr_ring_resize_multiple()
631 if (!queues) in ptr_ring_resize_multiple()
635 queues[i] = __ptr_ring_init_queue_alloc(size, gfp); in ptr_ring_resize_multiple()
636 if (!queues[i]) in ptr_ring_resize_multiple()
643 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], in ptr_ring_resize_multiple()
650 kvfree(queues[i]); in ptr_ring_resize_multiple()
652 kfree(queues); in ptr_ring_resize_multiple()
658 kvfree(queues[i]); in ptr_ring_resize_multiple()
660 kfree(queues); in ptr_ring_resize_multiple()
/Linux-v4.19/Documentation/block/
Dcfq-iosched.txt17 This specifies how long CFQ should idle for next request on certain cfq queues
22 queues/service trees. This can be very helpful on highly seeky media like
26 Setting slice_idle to 0 will remove all the idling on queues/service tree
76 queues in the group but happens overall on the group and thus still keeps the
78 Not idling on individual queues in the group will dispatch requests from
79 multiple queues in the group at the same time and achieve higher throughput
123 When a queue is selected for execution, the queues IO requests are only
208 it would be better to dispatch multiple requests from multiple cfq queues in
226 dispatch requests from other cfq queues even if requests are pending there.
235 CFQ has following service trees and various queues are put on these trees.
[all …]
/Linux-v4.19/drivers/gpu/drm/msm/adreno/
Da6xx_hfi.c137 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE]; in a6xx_hfi_task()
165 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE]; in a6xx_hfi_send_msg()
364 for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) { in a6xx_hfi_stop()
365 struct a6xx_hfi_queue *queue = &gmu->queues[i]; in a6xx_hfi_stop()
415 table_size += (ARRAY_SIZE(gmu->queues) * in a6xx_hfi_init()
423 table->num_queues = ARRAY_SIZE(gmu->queues); in a6xx_hfi_init()
424 table->active_queues = ARRAY_SIZE(gmu->queues); in a6xx_hfi_init()
428 a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset, in a6xx_hfi_init()
433 a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset, in a6xx_hfi_init()
/Linux-v4.19/drivers/net/ethernet/netronome/nfp/
Dnfp_net_debugfs.c206 struct dentry *queues, *tx, *rx, *xdp; in nfp_net_debugfs_vnic_add() local
222 queues = debugfs_create_dir("queue", nn->debugfs_dir); in nfp_net_debugfs_vnic_add()
223 if (IS_ERR_OR_NULL(queues)) in nfp_net_debugfs_vnic_add()
226 rx = debugfs_create_dir("rx", queues); in nfp_net_debugfs_vnic_add()
227 tx = debugfs_create_dir("tx", queues); in nfp_net_debugfs_vnic_add()
228 xdp = debugfs_create_dir("xdp", queues); in nfp_net_debugfs_vnic_add()
/Linux-v4.19/Documentation/devicetree/bindings/net/
Dfsl-fec.txt26 - fsl,num-tx-queues : The property is valid for enet-avb IP, which supports
27 hw multi queues. Should specify the tx queue number, otherwise set tx queue
29 - fsl,num-rx-queues : The property is valid for enet-avb IP, which supports
30 hw multi queues. Should specify the rx queue number, otherwise set rx queue
46 tx/rx queues 1 and 2. "int0" will be used for queue 0 and ENET_MII interrupts.
47 For imx6sx, "int0" handles all 3 queues and ENET_MII. "pps" is for the pulse
Dstmmac.txt78 configure the multiple RX queues:
79 - snps,rx-queues-to-use: number of RX queues to be used in the driver
96 configure the multiple TX queues:
97 - snps,tx-queues-to-use: number of TX queues to be used in the driver
124 mtl_rx_setup: rx-queues-config {
125 snps,rx-queues-to-use = <1>;
134 mtl_tx_setup: tx-queues-config {
135 snps,tx-queues-to-use = <2>;
Dbrcm,systemport.txt10 interrupts, and the second cell should be for the transmit queues. An
21 - systemport,num-txq: number of HW transmit queues, an integer
22 - systemport,num-rxq: number of HW receive queues, an integer
/Linux-v4.19/drivers/staging/fsl-dpaa2/ethernet/
Dethernet-driver.rst25 - queues, channels
32 hardware resources, like queues, do not have a corresponding MC object and
98 queues ---------------------- | | Buffer pool |
108 Frames are transmitted and received through hardware frame queues, which can be
110 enqueues TX frames on egress queues and after transmission is complete a TX
113 When frames are available on ingress queues, a data availability notification
115 queues in the same channel have available frames, only one notification is sent.
118 Each network interface can have multiple Rx, Tx and confirmation queues affined
/Linux-v4.19/Documentation/devicetree/bindings/mfd/
Dfsl-imx25-tsadc.txt3 This device combines two general purpose conversion queues one used for general
15 conversion queues.
20 This device includes two conversion queues which can be added as subnodes.
/Linux-v4.19/drivers/net/xen-netback/
Dinterface.c194 queue = &vif->queues[index]; in xenvif_start_xmit()
246 queue = &vif->queues[index]; in xenvif_get_stats()
270 queue = &vif->queues[queue_index]; in xenvif_up()
286 queue = &vif->queues[queue_index]; in xenvif_down()
399 void *vif_stats = &vif->queues[queue_index].stats; in xenvif_get_ethtool_stats()
475 vif->queues = NULL; in xenvif_alloc()
740 queue = &vif->queues[queue_index]; in xenvif_disconnect_data()
797 struct xenvif_queue *queues = vif->queues; in xenvif_free() local
805 xenvif_deinit_queue(&queues[queue_index]); in xenvif_free()
806 vfree(queues); in xenvif_free()

1234567891011