| /Linux-v5.10/drivers/net/wireless/broadcom/brcm80211/brcmfmac/ |
| D | flowring.c | 142 if (flow->rings[i] == NULL) in brcmf_flowring_create() 160 flow->rings[i] = ring; in brcmf_flowring_create() 172 ring = flow->rings[flowid]; in brcmf_flowring_tid() 192 ring = flow->rings[flowid]; in brcmf_flowring_block() 201 if ((flow->rings[i]) && (i != flowid)) { in brcmf_flowring_block() 202 ring = flow->rings[i]; in brcmf_flowring_block() 212 flow->rings[flowid]->blocked = blocked; in brcmf_flowring_block() 236 ring = flow->rings[flowid]; in brcmf_flowring_delete() 247 flow->rings[flowid] = NULL; in brcmf_flowring_delete() 264 ring = flow->rings[flowid]; in brcmf_flowring_enqueue() [all …]
|
| /Linux-v5.10/net/9p/ |
| D | trans_xen.c | 81 struct xen_9pfs_dataring *rings; member 158 ring = &priv->rings[num]; in p9_xen_request() 294 if (!priv->rings[i].intf) in xen_9pfs_front_free() 296 if (priv->rings[i].irq > 0) in xen_9pfs_front_free() 297 unbind_from_irqhandler(priv->rings[i].irq, priv->dev); in xen_9pfs_front_free() 298 if (priv->rings[i].data.in) { in xen_9pfs_front_free() 300 j < (1 << priv->rings[i].intf->ring_order); in xen_9pfs_front_free() 304 ref = priv->rings[i].intf->ref[j]; in xen_9pfs_front_free() 307 free_pages((unsigned long)priv->rings[i].data.in, in xen_9pfs_front_free() 308 priv->rings[i].intf->ring_order - in xen_9pfs_front_free() [all …]
|
| /Linux-v5.10/tools/lib/bpf/ |
| D | ringbuf.c | 36 struct ring *rings; member 81 tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings)); in ring_buffer__add() 84 rb->rings = tmp; in ring_buffer__add() 91 r = &rb->rings[rb->ring_cnt]; in ring_buffer__add() 151 ringbuf_unmap_ring(rb, &rb->rings[i]); in ring_buffer__free() 156 free(rb->rings); in ring_buffer__free() 255 struct ring *ring = &rb->rings[i]; in ring_buffer__consume() 276 struct ring *ring = &rb->rings[ring_id]; in ring_buffer__poll()
|
| /Linux-v5.10/tools/testing/selftests/net/ |
| D | psock_fanout.c | 221 static int sock_fanout_read(int fds[], char *rings[], const int expect[]) in sock_fanout_read() argument 225 ret[0] = sock_fanout_read_ring(fds[0], rings[0]); in sock_fanout_read() 226 ret[1] = sock_fanout_read_ring(fds[1], rings[1]); in sock_fanout_read() 348 char *rings[2]; in test_datapath() local 367 rings[0] = sock_fanout_open_ring(fds[0]); in test_datapath() 368 rings[1] = sock_fanout_open_ring(fds[1]); in test_datapath() 371 sock_fanout_read(fds, rings, expect0); in test_datapath() 376 ret = sock_fanout_read(fds, rings, expect1); in test_datapath() 381 ret |= sock_fanout_read(fds, rings, expect2); in test_datapath() 383 if (munmap(rings[1], RING_NUM_FRAMES * getpagesize()) || in test_datapath() [all …]
|
| /Linux-v5.10/Documentation/mhi/ |
| D | mhi.rst | 58 Transfer rings: Used by the host to schedule work items for a channel. The 59 transfer rings are organized as a circular queue of Transfer Descriptors (TD). 64 Event rings: Used by the device to send completion and state transition messages 70 Command rings: Used by the host to send MHI commands to the device. The command 71 rings are organized as a circular queue of Command Descriptors (CD). 81 Two unidirectional channels with their associated transfer rings form a 87 Transfer rings 91 Transfer Descriptors (TD). TDs are managed through transfer rings, which are 101 Below is the basic usage of transfer rings: 110 buffer information, increments the WP to the next element and rings the [all …]
|
| /Linux-v5.10/Documentation/networking/ |
| D | af_xdp.rst | 24 syscall. Associated with each XSK are two rings: the RX ring and the 26 packets on the TX ring. These rings are registered and sized with the 28 to have at least one of these rings for each socket. An RX or TX 37 one of the rings references a frame by referencing its addr. The addr 42 UMEM also has two rings: the FILL ring and the COMPLETION ring. The 50 TX ring. In summary, the RX and FILL rings are used for the RX path 51 and the TX and COMPLETION rings are used for the TX path. 59 corresponding two rings, sets the XDP_SHARED_UMEM flag in the bind 65 process has to create its own socket with associated RX and TX rings, 67 reason that there is only one set of FILL and COMPLETION rings per [all …]
|
| /Linux-v5.10/drivers/soc/ti/ |
| D | k3-ringacc.c | 201 struct k3_ring *rings; member 309 !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED)) in k3_ringacc_request_ring() 311 else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED) in k3_ringacc_request_ring() 323 ringacc->rings[id].proxy_id = proxy_id; in k3_ringacc_request_ring() 332 ringacc->rings[id].use_count++; in k3_ringacc_request_ring() 334 return &ringacc->rings[id]; in k3_ringacc_request_ring() 1157 ringacc->rings = devm_kzalloc(dev, in k3_ringacc_init() 1158 sizeof(*ringacc->rings) * in k3_ringacc_init() 1168 if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse) in k3_ringacc_init() 1172 ringacc->rings[i].rt = base_rt + in k3_ringacc_init() [all …]
|
| /Linux-v5.10/drivers/block/xen-blkback/ |
| D | xenbus.c | 84 if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev) in xen_update_blkif_status() 110 ring = &blkif->rings[i]; in xen_update_blkif_status() 124 ring = &blkif->rings[i]; in xen_update_blkif_status() 134 blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring), in xen_blkif_alloc_rings() 136 if (!blkif->rings) in xen_blkif_alloc_rings() 140 struct xen_blkif_ring *ring = &blkif->rings[r]; in xen_blkif_alloc_rings() 269 struct xen_blkif_ring *ring = &blkif->rings[r]; in xen_blkif_disconnect() 332 kfree(blkif->rings); in xen_blkif_disconnect() 333 blkif->rings = NULL; in xen_blkif_disconnect() 383 if (!blkif->rings) \ [all …]
|
| /Linux-v5.10/include/linux/ |
| D | ptr_ring.h | 619 static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, in ptr_ring_resize_multiple() argument 639 spin_lock_irqsave(&(rings[i])->consumer_lock, flags); in ptr_ring_resize_multiple() 640 spin_lock(&(rings[i])->producer_lock); in ptr_ring_resize_multiple() 641 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], in ptr_ring_resize_multiple() 643 spin_unlock(&(rings[i])->producer_lock); in ptr_ring_resize_multiple() 644 spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags); in ptr_ring_resize_multiple()
|
| D | skb_array.h | 201 static inline int skb_array_resize_multiple(struct skb_array **rings, in skb_array_resize_multiple() argument 206 return ptr_ring_resize_multiple((struct ptr_ring **)rings, in skb_array_resize_multiple()
|
| /Linux-v5.10/drivers/crypto/inside-secure/ |
| D | safexcel.c | 51 for (i = 0; i < priv->config.rings; i++) { in eip197_trc_cache_setupvirt() 502 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_setup_cdesc_rings() 550 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_setup_rdesc_rings() 592 priv->config.pes, priv->config.rings); in safexcel_hw_init() 654 GENMASK(priv->config.rings - 1, 0), in safexcel_hw_init() 712 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_init() 738 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_init() 762 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), in safexcel_hw_init() 766 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), in safexcel_hw_init() 1339 priv->config.rings = min_t(u32, priv->hwconfig.hwnumrings, max_rings); in safexcel_configure() [all …]
|
| /Linux-v5.10/Documentation/devicetree/bindings/soc/qcom/ |
| D | qcom,wcnss.txt | 67 Definition: should reference the tx-enable and tx-rings-empty SMEM states 72 Definition: must contain "tx-enable" and "tx-rings-empty" 111 qcom,smem-state-names = "tx-enable", "tx-rings-empty";
|
| /Linux-v5.10/Documentation/networking/device_drivers/ethernet/google/ |
| D | gve.rst | 105 The descriptor rings are power-of-two-sized ring buffers consisting of 116 gve maps the buffers for transmit rings into a FIFO and copies the packets 121 The buffers for receive rings are put into a data ring that is the same 123 the rings together.
|
| /Linux-v5.10/drivers/crypto/qat/qat_common/ |
| D | adf_transport.c | 233 ring = &bank->rings[ring_num]; in adf_create_ring() 296 adf_handle_response(&bank->rings[i]); in adf_ring_response_handler() 369 ring = &bank->rings[i]; in adf_init_bank() 383 tx_ring = &bank->rings[i - hw_data->tx_rx_gap]; in adf_init_bank() 398 ring = &bank->rings[i]; in adf_init_bank() 472 struct adf_etr_ring_data *ring = &bank->rings[i]; in cleanup_bank()
|
| D | adf_transport_internal.h | 31 struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK]; member
|
| /Linux-v5.10/net/ethtool/ |
| D | Makefile | 8 linkstate.o debug.o wol.o features.o privflags.o rings.o \
|
| /Linux-v5.10/fs/ |
| D | io_uring.c | 292 struct io_rings *rings; member 1205 struct io_rings *rings = ctx->rings; in __io_commit_cqring() local 1208 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail); in __io_commit_cqring() 1544 struct io_rings *r = ctx->rings; in io_sqring_full() 1551 struct io_rings *rings = ctx->rings; in io_get_cqring() local 1560 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries) in io_get_cqring() 1564 return &rings->cqes[tail & ctx->cq_mask]; in io_get_cqring() 1571 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED) in io_should_trigger_evfd() 1593 ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW; in io_cqring_mark_overflow() 1628 struct io_rings *rings = ctx->rings; in io_cqring_overflow_flush() local [all …]
|
| /Linux-v5.10/drivers/gpu/drm/amd/amdgpu/ |
| D | amdgpu_ring.c | 190 adev->rings[ring->idx] = ring; in amdgpu_ring_init() 288 if (!(ring->adev) || !(ring->adev->rings[ring->idx])) in amdgpu_ring_fini() 307 ring->adev->rings[ring->idx] = NULL; in amdgpu_ring_fini()
|
| D | amdgpu_fence.c | 531 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_fini() 567 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_suspend() 602 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_resume() 707 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_debugfs_fence_info()
|
| D | amdgpu_test.c | 49 if (adev->rings[i]) in amdgpu_do_test_moves() 50 n -= adev->rings[i]->ring_size; in amdgpu_do_test_moves()
|
| /Linux-v5.10/Documentation/devicetree/bindings/net/ |
| D | opencores-ethoc.txt | 6 first region is for the device registers and descriptor rings,
|
| /Linux-v5.10/Documentation/devicetree/bindings/crypto/ |
| D | mediatek-crypto.txt | 7 order. These are global system and four descriptor rings.
|
| D | brcm,spu-crypto.txt | 15 Mailbox channels correspond to DMA rings on the device.
|
| /Linux-v5.10/drivers/net/ |
| D | tap.c | 1283 struct ptr_ring **rings; in tap_queue_resize() local 1287 rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); in tap_queue_resize() 1288 if (!rings) in tap_queue_resize() 1292 rings[i++] = &q->ring; in tap_queue_resize() 1294 ret = ptr_ring_resize_multiple(rings, n, in tap_queue_resize() 1298 kfree(rings); in tap_queue_resize()
|
| /Linux-v5.10/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
| D | hv_vhca.h | 28 u16 rings; member
|