Home
last modified time | relevance | path

Searched refs:rings (Results 1 – 25 of 69) sorted by relevance

123

/Linux-v5.4/drivers/net/wireless/broadcom/brcm80211/brcmfmac/
Dflowring.c142 if (flow->rings[i] == NULL) in brcmf_flowring_create()
160 flow->rings[i] = ring; in brcmf_flowring_create()
172 ring = flow->rings[flowid]; in brcmf_flowring_tid()
192 ring = flow->rings[flowid]; in brcmf_flowring_block()
201 if ((flow->rings[i]) && (i != flowid)) { in brcmf_flowring_block()
202 ring = flow->rings[i]; in brcmf_flowring_block()
212 flow->rings[flowid]->blocked = blocked; in brcmf_flowring_block()
236 ring = flow->rings[flowid]; in brcmf_flowring_delete()
247 flow->rings[flowid] = NULL; in brcmf_flowring_delete()
264 ring = flow->rings[flowid]; in brcmf_flowring_enqueue()
[all …]
Dflowring.h41 struct brcmf_flowring_ring **rings; member
/Linux-v5.4/net/9p/
Dtrans_xen.c81 struct xen_9pfs_dataring *rings; member
158 ring = &priv->rings[num]; in p9_xen_request()
293 if (!priv->rings[i].intf) in xen_9pfs_front_free()
295 if (priv->rings[i].irq > 0) in xen_9pfs_front_free()
296 unbind_from_irqhandler(priv->rings[i].irq, priv->dev); in xen_9pfs_front_free()
297 if (priv->rings[i].data.in) { in xen_9pfs_front_free()
301 ref = priv->rings[i].intf->ref[j]; in xen_9pfs_front_free()
304 free_pages((unsigned long)priv->rings[i].data.in, in xen_9pfs_front_free()
308 gnttab_end_foreign_access(priv->rings[i].ref, 0, 0); in xen_9pfs_front_free()
309 free_page((unsigned long)priv->rings[i].intf); in xen_9pfs_front_free()
[all …]
/Linux-v5.4/tools/testing/selftests/net/
Dpsock_fanout.c221 static int sock_fanout_read(int fds[], char *rings[], const int expect[]) in sock_fanout_read() argument
225 ret[0] = sock_fanout_read_ring(fds[0], rings[0]); in sock_fanout_read()
226 ret[1] = sock_fanout_read_ring(fds[1], rings[1]); in sock_fanout_read()
348 char *rings[2]; in test_datapath() local
366 rings[0] = sock_fanout_open_ring(fds[0]); in test_datapath()
367 rings[1] = sock_fanout_open_ring(fds[1]); in test_datapath()
370 sock_fanout_read(fds, rings, expect0); in test_datapath()
375 ret = sock_fanout_read(fds, rings, expect1); in test_datapath()
380 ret |= sock_fanout_read(fds, rings, expect2); in test_datapath()
382 if (munmap(rings[1], RING_NUM_FRAMES * getpagesize()) || in test_datapath()
[all …]
/Linux-v5.4/fs/
Dio_uring.c225 struct io_rings *rings; member
471 struct io_rings *rings = ctx->rings; in __io_commit_cqring() local
473 if (ctx->cached_cq_tail != READ_ONCE(rings->cq.tail)) { in __io_commit_cqring()
475 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail); in __io_commit_cqring()
546 struct io_rings *rings = ctx->rings; in io_get_cqring() local
555 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries) in io_get_cqring()
559 return &rings->cqes[tail & ctx->cq_mask]; in io_get_cqring()
578 WRITE_ONCE(ctx->rings->cq_overflow, in io_cqring_fill_event()
738 static unsigned io_cqring_events(struct io_rings *rings) in io_cqring_events() argument
742 return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head); in io_cqring_events()
[all …]
/Linux-v5.4/drivers/gpu/drm/amd/amdgpu/
Damdgpu_ctx.c124 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; in amdgpu_ctx_init() local
131 rings[0] = &adev->gfx.gfx_ring[0]; in amdgpu_ctx_init()
136 rings[j] = &adev->gfx.compute_ring[j]; in amdgpu_ctx_init()
141 rings[j] = &adev->sdma.instance[j].ring; in amdgpu_ctx_init()
145 rings[0] = &adev->uvd.inst[0].ring; in amdgpu_ctx_init()
149 rings[0] = &adev->vce.ring[0]; in amdgpu_ctx_init()
153 rings[0] = &adev->uvd.inst[0].ring_enc[0]; in amdgpu_ctx_init()
160 rings[num_rings++] = &adev->vcn.inst[j].ring_dec; in amdgpu_ctx_init()
168 rings[num_rings++] = &adev->vcn.inst[j].ring_enc[k]; in amdgpu_ctx_init()
175 rings[num_rings++] = &adev->vcn.inst[j].ring_jpeg; in amdgpu_ctx_init()
[all …]
Damdgpu_fence.c533 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_fini()
567 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_suspend()
601 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_resume()
705 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_debugfs_fence_info()
Damdgpu_ring.c261 adev->rings[ring->idx] = ring; in amdgpu_ring_init()
357 if (!(ring->adev) || !(ring->adev->rings[ring->idx])) in amdgpu_ring_fini()
376 ring->adev->rings[ring->idx] = NULL; in amdgpu_ring_fini()
Damdgpu_test.c49 if (adev->rings[i]) in amdgpu_do_test_moves()
50 n -= adev->rings[i]->ring_size; in amdgpu_do_test_moves()
/Linux-v5.4/Documentation/networking/
Daf_xdp.rst24 syscall. Associated with each XSK are two rings: the RX ring and the
26 packets on the TX ring. These rings are registered and sized with the
28 to have at least one of these rings for each socket. An RX or TX
37 one of the rings references a frame by referencing its addr. The addr
42 UMEM also has two rings: the FILL ring and the COMPLETION ring. The
50 TX ring. In summary, the RX and FILL rings are used for the RX path
51 and the TX and COMPLETION rings are used for the TX path.
59 corresponding two rings, sets the XDP_SHARED_UMEM flag in the bind
65 process has to create its own socket with associated RX and TX rings,
67 reason that there is only one set of FILL and COMPLETION rings per
[all …]
/Linux-v5.4/drivers/block/xen-blkback/
Dxenbus.c84 if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev) in xen_update_blkif_status()
110 ring = &blkif->rings[i]; in xen_update_blkif_status()
124 ring = &blkif->rings[i]; in xen_update_blkif_status()
134 blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring), in xen_blkif_alloc_rings()
136 if (!blkif->rings) in xen_blkif_alloc_rings()
140 struct xen_blkif_ring *ring = &blkif->rings[r]; in xen_blkif_alloc_rings()
243 struct xen_blkif_ring *ring = &blkif->rings[r]; in xen_blkif_disconnect()
307 kfree(blkif->rings); in xen_blkif_disconnect()
308 blkif->rings = NULL; in xen_blkif_disconnect()
351 if (!blkif->rings) \
[all …]
/Linux-v5.4/include/linux/
Dptr_ring.h618 static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, in ptr_ring_resize_multiple() argument
638 spin_lock_irqsave(&(rings[i])->consumer_lock, flags); in ptr_ring_resize_multiple()
639 spin_lock(&(rings[i])->producer_lock); in ptr_ring_resize_multiple()
640 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], in ptr_ring_resize_multiple()
642 spin_unlock(&(rings[i])->producer_lock); in ptr_ring_resize_multiple()
643 spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags); in ptr_ring_resize_multiple()
Dskb_array.h201 static inline int skb_array_resize_multiple(struct skb_array **rings, in skb_array_resize_multiple() argument
206 return ptr_ring_resize_multiple((struct ptr_ring **)rings, in skb_array_resize_multiple()
/Linux-v5.4/drivers/crypto/inside-secure/
Dsafexcel.c51 for (i = 0; i < priv->config.rings; i++) { in eip197_trc_cache_setupvirt()
475 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_setup_cdesc_rings()
523 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_setup_rdesc_rings()
565 priv->config.pes, priv->config.rings); in safexcel_hw_init()
627 GENMASK(priv->config.rings - 1, 0), in safexcel_hw_init()
678 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_init()
704 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_init()
728 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), in safexcel_hw_init()
732 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), in safexcel_hw_init()
1255 priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings); in safexcel_configure()
[all …]
/Linux-v5.4/Documentation/devicetree/bindings/soc/qcom/
Dqcom,wcnss.txt67 Definition: should reference the tx-enable and tx-rings-empty SMEM states
72 Definition: must contain "tx-enable" and "tx-rings-empty"
111 qcom,smem-state-names = "tx-enable", "tx-rings-empty";
/Linux-v5.4/Documentation/networking/device_drivers/google/
Dgve.rst105 The descriptor rings are power-of-two-sized ring buffers consisting of
116 gve maps the buffers for transmit rings into a FIFO and copies the packets
121 The buffers for receive rings are put into a data ring that is the same
123 the rings together.
/Linux-v5.4/drivers/crypto/qat/qat_common/
Dadf_transport.c277 ring = &bank->rings[ring_num]; in adf_create_ring()
340 adf_handle_response(&bank->rings[i]); in adf_ring_response_handler()
413 ring = &bank->rings[i]; in adf_init_bank()
427 tx_ring = &bank->rings[i - hw_data->tx_rx_gap]; in adf_init_bank()
442 ring = &bank->rings[i]; in adf_init_bank()
516 struct adf_etr_ring_data *ring = &bank->rings[i]; in cleanup_bank()
Dadf_transport_internal.h76 struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK]; member
/Linux-v5.4/Documentation/devicetree/bindings/net/
Dopencores-ethoc.txt6 first region is for the device registers and descriptor rings,
/Linux-v5.4/Documentation/devicetree/bindings/crypto/
Dmediatek-crypto.txt7 order. These are global system and four descriptor rings.
Dbrcm,spu-crypto.txt15 Mailbox channels correspond to DMA rings on the device.
Dinside-secure-safexcel.txt8 - interrupts: Interrupt numbers for the rings and engine.
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/lib/
Dhv_vhca.h28 u16 rings; member
/Linux-v5.4/drivers/net/
Dtap.c1295 struct ptr_ring **rings; in tap_queue_resize() local
1299 rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); in tap_queue_resize()
1300 if (!rings) in tap_queue_resize()
1304 rings[i++] = &q->ring; in tap_queue_resize()
1306 ret = ptr_ring_resize_multiple(rings, n, in tap_queue_resize()
1310 kfree(rings); in tap_queue_resize()
/Linux-v5.4/drivers/mailbox/
Dbcm-flexrm-mailbox.c295 struct flexrm_ring *rings; member
941 ring = &mbox->rings[i]; in flexrm_write_config_in_seqfile()
969 ring = &mbox->rings[i]; in flexrm_write_stats_in_seqfile()
1487 struct flexrm_ring *ring = &mbox->rings[desc->platform.msi_index]; in flexrm_mbox_msi_write()
1548 mbox->rings = ring; in flexrm_mbox_probe()
1553 ring = &mbox->rings[index]; in flexrm_mbox_probe()
1612 ring = &mbox->rings[desc->platform.msi_index]; in flexrm_mbox_probe()
1647 mbox->controller.chans[index].con_priv = &mbox->rings[index]; in flexrm_mbox_probe()

123