/Linux-v5.10/include/linux/soc/ti/ |
D | k3-ringacc.h | 3 * K3 Ring Accelerator (RA) subsystem interface 18 * RA ring operational modes 20 * @K3_RINGACC_RING_MODE_RING: Exposed Ring mode for SW direct access 43 * RA ring element's sizes in bytes. 60 * enum k3_ring_cfg - RA ring configuration structure 62 * @size: Ring size, number of elements 63 * @elm_size: Ring element size 64 * @mode: Ring operational mode 65 * @flags: Ring configuration flags. Possible values: 66 * @K3_RINGACC_RING_SHARED: when set allows to request the same ring [all …]
|
/Linux-v5.10/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_ring.c | 40 * Most engines on the GPU are fed via ring buffers. Ring 46 * pointers are equal, the ring is idle. When the host 47 * writes commands to the ring buffer, it increments the 53 * amdgpu_ring_alloc - allocate space on the ring buffer 56 * @ring: amdgpu_ring structure holding ring information 57 * @ndw: number of dwords to allocate in the ring buffer 59 * Allocate @ndw dwords in the ring buffer (all asics). 62 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw) in amdgpu_ring_alloc() argument 66 ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask; in amdgpu_ring_alloc() 71 if (WARN_ON_ONCE(ndw > ring->max_dw)) in amdgpu_ring_alloc() [all …]
|
D | jpeg_v1_0.c | 36 static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring); 38 static void jpeg_v1_0_decode_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_… in jpeg_v1_0_decode_ring_patch_wreg() argument 40 struct amdgpu_device *adev = ring->adev; in jpeg_v1_0_decode_ring_patch_wreg() 41 …ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACK… in jpeg_v1_0_decode_ring_patch_wreg() 44 ring->ring[(*ptr)++] = 0; in jpeg_v1_0_decode_ring_patch_wreg() 45 ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0); in jpeg_v1_0_decode_ring_patch_wreg() 47 ring->ring[(*ptr)++] = reg_offset; in jpeg_v1_0_decode_ring_patch_wreg() 48 ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0); in jpeg_v1_0_decode_ring_patch_wreg() 50 ring->ring[(*ptr)++] = val; in jpeg_v1_0_decode_ring_patch_wreg() 53 static void jpeg_v1_0_decode_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr) in jpeg_v1_0_decode_ring_set_patch_ring() argument [all …]
|
D | amdgpu_fence.c | 49 * are no longer in use by the associated ring on the GPU and 57 struct amdgpu_ring *ring; member 94 * @ring: ring the fence is associated with 99 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq) in amdgpu_fence_write() argument 101 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_write() 110 * @ring: ring the fence is associated with 115 static u32 amdgpu_fence_read(struct amdgpu_ring *ring) in amdgpu_fence_read() argument 117 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_read() 129 * amdgpu_fence_emit - emit a fence on the requested ring 131 * @ring: ring the fence is associated with [all …]
|
D | amdgpu_ring.h | 74 /* Direct submission to the ring buffer during init and reset. */ 97 /* sync_seq is protected by ring emission lock */ 111 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring); 113 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, 115 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 120 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, 122 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s, 124 bool amdgpu_fence_process(struct amdgpu_ring *ring); 125 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); 126 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring, [all …]
|
D | jpeg_v2_0.c | 65 * Set ring and irq function pointers 89 struct amdgpu_ring *ring; in jpeg_v2_0_sw_init() local 106 ring = &adev->jpeg.inst->ring_dec; in jpeg_v2_0_sw_init() 107 ring->use_doorbell = true; in jpeg_v2_0_sw_init() 108 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; in jpeg_v2_0_sw_init() 109 sprintf(ring->name, "jpeg_dec"); in jpeg_v2_0_sw_init() 110 r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, in jpeg_v2_0_sw_init() 151 struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; in jpeg_v2_0_hw_init() local 154 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, in jpeg_v2_0_hw_init() 157 r = amdgpu_ring_test_helper(ring); in jpeg_v2_0_hw_init() [all …]
|
/Linux-v5.10/drivers/gpu/drm/radeon/ |
D | radeon_ring.c | 38 * Most engines on the GPU are fed via ring buffers. Ring 44 * pointers are equal, the ring is idle. When the host 45 * writes commands to the ring buffer, it increments the 49 static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring); 52 * radeon_ring_supports_scratch_reg - check if the ring supports 56 * @ring: radeon_ring structure holding ring information 58 * Check if a specific ring supports writing to scratch registers (all asics). 59 * Returns true if the ring supports writing to scratch regs, false if not. 62 struct radeon_ring *ring) in radeon_ring_supports_scratch_reg() argument 64 switch (ring->idx) { in radeon_ring_supports_scratch_reg() [all …]
|
D | r600_dma.c | 35 * to the 3D engine (ring buffer, IBs, etc.), but the 47 * @ring: radeon ring pointer 52 struct radeon_ring *ring) in r600_dma_get_rptr() argument 57 rptr = rdev->wb.wb[ring->rptr_offs/4]; in r600_dma_get_rptr() 68 * @ring: radeon ring pointer 73 struct radeon_ring *ring) in r600_dma_get_wptr() argument 82 * @ring: radeon ring pointer 87 struct radeon_ring *ring) in r600_dma_set_wptr() argument 89 WREG32(DMA_RB_WPTR, (ring->wptr << 2) & 0x3fffc); in r600_dma_set_wptr() 109 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; in r600_dma_stop() [all …]
|
D | cik_sdma.c | 43 * and each one supports 1 ring buffer used for gfx 47 * (ring buffer, IBs, etc.), but sDMA has it's own 59 * @ring: radeon ring pointer 64 struct radeon_ring *ring) in cik_sdma_get_rptr() argument 69 rptr = rdev->wb.wb[ring->rptr_offs/4]; in cik_sdma_get_rptr() 71 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cik_sdma_get_rptr() 86 * @ring: radeon ring pointer 91 struct radeon_ring *ring) in cik_sdma_get_wptr() argument 95 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cik_sdma_get_wptr() 107 * @ring: radeon ring pointer [all …]
|
/Linux-v5.10/drivers/soc/ti/ |
D | k3-ringacc.c | 3 * TI K3 NAVSS Ring Accelerator subsystem driver 29 * @db: Ring Doorbell Register 31 * @occ: Ring Occupancy Register 32 * @indx: Ring Current Index Register 33 * @hwocc: Ring Hardware Occupancy Register 34 * @hwindx: Ring Hardware Current Index Register 49 * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region 51 * @head_data: Ring Head Entry Data Registers 52 * @tail_data: Ring Tail Entry Data Registers 53 * @peek_head_data: Ring Peek Head Entry Data Regs [all …]
|
/Linux-v5.10/drivers/net/wireless/broadcom/b43legacy/ |
D | dma.c | 32 struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring, in op32_idx2desc() argument 38 *meta = &(ring->meta[slot]); in op32_idx2desc() 39 desc = ring->descbase; in op32_idx2desc() 45 static void op32_fill_descriptor(struct b43legacy_dmaring *ring, in op32_fill_descriptor() argument 50 struct b43legacy_dmadesc32 *descbase = ring->descbase; in op32_fill_descriptor() 57 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); in op32_fill_descriptor() 62 addr |= ring->dev->dma.translation; in op32_fill_descriptor() 63 ctl = (bufsize - ring->frameoffset) in op32_fill_descriptor() 65 if (slot == ring->nr_slots - 1) in op32_fill_descriptor() 80 static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot) in op32_poke_tx() argument [all …]
|
/Linux-v5.10/drivers/net/wireless/broadcom/b43/ |
D | dma.c | 72 struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring, in op32_idx2desc() argument 78 *meta = &(ring->meta[slot]); in op32_idx2desc() 79 desc = ring->descbase; in op32_idx2desc() 85 static void op32_fill_descriptor(struct b43_dmaring *ring, in op32_fill_descriptor() argument 90 struct b43_dmadesc32 *descbase = ring->descbase; in op32_fill_descriptor() 97 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); in op32_fill_descriptor() 99 addr = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW); in op32_fill_descriptor() 100 addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT); in op32_fill_descriptor() 103 if (slot == ring->nr_slots - 1) in op32_fill_descriptor() 118 static void op32_poke_tx(struct b43_dmaring *ring, int slot) in op32_poke_tx() argument [all …]
|
/Linux-v5.10/drivers/net/ethernet/apm/xgene/ |
D | xgene_enet_ring2.c | 12 static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring) in xgene_enet_ring_init() argument 14 u32 *ring_cfg = ring->state; in xgene_enet_ring_init() 15 u64 addr = ring->dma; in xgene_enet_ring_init() 17 if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) { in xgene_enet_ring_init() 18 ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK); in xgene_enet_ring_init() 27 ring_cfg[3] |= SET_VAL(RINGSIZE, ring->cfgsize) in xgene_enet_ring_init() 34 static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring) in xgene_enet_ring_set_type() argument 36 u32 *ring_cfg = ring->state; in xgene_enet_ring_set_type() 40 is_bufpool = xgene_enet_is_bufpool(ring->id); in xgene_enet_ring_set_type() 47 static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring) in xgene_enet_ring_set_recombbuf() argument [all …]
|
/Linux-v5.10/drivers/thunderbolt/ |
D | nhi.c | 26 #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") argument 39 static int ring_interrupt_index(struct tb_ring *ring) in ring_interrupt_index() argument 41 int bit = ring->hop; in ring_interrupt_index() 42 if (!ring->is_tx) in ring_interrupt_index() 43 bit += ring->nhi->hop_count; in ring_interrupt_index() 48 * ring_interrupt_active() - activate/deactivate interrupts for a single ring 50 * ring->nhi->lock must be held. 52 static void ring_interrupt_active(struct tb_ring *ring, bool active) in ring_interrupt_active() argument 55 ring_interrupt_index(ring) / 32 * 4; in ring_interrupt_active() 56 int bit = ring_interrupt_index(ring) & 31; in ring_interrupt_active() [all …]
|
/Linux-v5.10/drivers/crypto/qat/qat_common/ |
D | adf_transport.c | 36 static int adf_reserve_ring(struct adf_etr_bank_data *bank, u32 ring) in adf_reserve_ring() argument 39 if (bank->ring_mask & (1 << ring)) { in adf_reserve_ring() 43 bank->ring_mask |= (1 << ring); in adf_reserve_ring() 48 static void adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring) in adf_unreserve_ring() argument 51 bank->ring_mask &= ~(1 << ring); in adf_unreserve_ring() 55 static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring) in adf_enable_ring_irq() argument 58 bank->irq_mask |= (1 << ring); in adf_enable_ring_irq() 65 static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring) in adf_disable_ring_irq() argument 68 bank->irq_mask &= ~(1 << ring); in adf_disable_ring_irq() 73 int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg) in adf_send_message() argument [all …]
|
/Linux-v5.10/drivers/gpu/drm/i915/gt/ |
D | intel_ring.c | 14 unsigned int intel_ring_update_space(struct intel_ring *ring) in intel_ring_update_space() argument 18 space = __intel_ring_space(ring->head, ring->emit, ring->size); in intel_ring_update_space() 20 ring->space = space; in intel_ring_update_space() 24 void __intel_ring_pin(struct intel_ring *ring) in __intel_ring_pin() argument 26 GEM_BUG_ON(!atomic_read(&ring->pin_count)); in __intel_ring_pin() 27 atomic_inc(&ring->pin_count); in __intel_ring_pin() 30 int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww) in intel_ring_pin() argument 32 struct i915_vma *vma = ring->vma; in intel_ring_pin() 37 if (atomic_fetch_inc(&ring->pin_count)) in intel_ring_pin() 40 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ in intel_ring_pin() [all …]
|
D | selftest_ring.c | 8 struct intel_ring *ring; in mock_ring() local 10 ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL); in mock_ring() 11 if (!ring) in mock_ring() 14 kref_init(&ring->ref); in mock_ring() 15 ring->size = sz; in mock_ring() 16 ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(sz); in mock_ring() 17 ring->effective_size = sz; in mock_ring() 18 ring->vaddr = (void *)(ring + 1); in mock_ring() 19 atomic_set(&ring->pin_count, 1); in mock_ring() 21 intel_ring_update_space(ring); in mock_ring() [all …]
|
/Linux-v5.10/drivers/crypto/inside-secure/ |
D | safexcel_ring.c | 21 /* Actual command descriptor ring */ in safexcel_init_ring_descriptors() 32 /* Command descriptor shadow ring for storing additional token data */ in safexcel_init_ring_descriptors() 78 struct safexcel_desc_ring *ring, in safexcel_ring_next_cwptr() argument 82 void *ptr = ring->write; in safexcel_ring_next_cwptr() 85 *atoken = ring->shwrite; in safexcel_ring_next_cwptr() 87 if ((ring->write == ring->read - ring->offset) || in safexcel_ring_next_cwptr() 88 (ring->read == ring->base && ring->write == ring->base_end)) in safexcel_ring_next_cwptr() 91 if (ring->write == ring->base_end) { in safexcel_ring_next_cwptr() 92 ring->write = ring->base; in safexcel_ring_next_cwptr() 93 ring->shwrite = ring->shbase; in safexcel_ring_next_cwptr() [all …]
|
/Linux-v5.10/Documentation/devicetree/bindings/net/wireless/ |
D | qcom,ath11k.yaml | 33 - description: interrupt event for ring CE0 34 - description: interrupt event for ring CE1 35 - description: interrupt event for ring CE2 36 - description: interrupt event for ring CE3 37 - description: interrupt event for ring CE4 38 - description: interrupt event for ring CE5 39 - description: interrupt event for ring CE6 40 - description: interrupt event for ring CE7 41 - description: interrupt event for ring CE8 42 - description: interrupt event for ring CE9 [all …]
|
/Linux-v5.10/drivers/net/ethernet/mellanox/mlx4/ |
D | en_tx.c | 55 struct mlx4_en_tx_ring *ring; in mlx4_en_create_tx_ring() local 59 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); in mlx4_en_create_tx_ring() 60 if (!ring) { in mlx4_en_create_tx_ring() 61 en_err(priv, "Failed allocating TX ring\n"); in mlx4_en_create_tx_ring() 65 ring->size = size; in mlx4_en_create_tx_ring() 66 ring->size_mask = size - 1; in mlx4_en_create_tx_ring() 67 ring->sp_stride = stride; in mlx4_en_create_tx_ring() 68 ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS; in mlx4_en_create_tx_ring() 71 ring->tx_info = kvmalloc_node(tmp, GFP_KERNEL, node); in mlx4_en_create_tx_ring() 72 if (!ring->tx_info) { in mlx4_en_create_tx_ring() [all …]
|
/Linux-v5.10/drivers/gpu/drm/msm/ |
D | msm_ringbuffer.c | 13 struct msm_ringbuffer *ring; in msm_ringbuffer_new() local 20 ring = kzalloc(sizeof(*ring), GFP_KERNEL); in msm_ringbuffer_new() 21 if (!ring) { in msm_ringbuffer_new() 26 ring->gpu = gpu; in msm_ringbuffer_new() 27 ring->id = id; in msm_ringbuffer_new() 29 ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ, in msm_ringbuffer_new() 31 gpu->aspace, &ring->bo, &ring->iova); in msm_ringbuffer_new() 33 if (IS_ERR(ring->start)) { in msm_ringbuffer_new() 34 ret = PTR_ERR(ring->start); in msm_ringbuffer_new() 35 ring->start = 0; in msm_ringbuffer_new() [all …]
|
/Linux-v5.10/drivers/net/wireless/ath/ath11k/ |
D | dbring.c | 10 struct ath11k_dbring *ring, in ath11k_dbring_bufs_replenish() argument 22 srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; in ath11k_dbring_bufs_replenish() 29 ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align); in ath11k_dbring_bufs_replenish() 30 paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz, in ath11k_dbring_bufs_replenish() 37 spin_lock_bh(&ring->idr_lock); in ath11k_dbring_bufs_replenish() 38 buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, gfp); in ath11k_dbring_bufs_replenish() 39 spin_unlock_bh(&ring->idr_lock); in ath11k_dbring_bufs_replenish() 63 spin_lock_bh(&ring->idr_lock); in ath11k_dbring_bufs_replenish() 64 idr_remove(&ring->bufs_idr, buf_id); in ath11k_dbring_bufs_replenish() 65 spin_unlock_bh(&ring->idr_lock); in ath11k_dbring_bufs_replenish() [all …]
|
/Linux-v5.10/net/rds/ |
D | ib_ring.c | 66 void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr) in rds_ib_ring_init() argument 68 memset(ring, 0, sizeof(*ring)); in rds_ib_ring_init() 69 ring->w_nr = nr; in rds_ib_ring_init() 70 rdsdebug("ring %p nr %u\n", ring, ring->w_nr); in rds_ib_ring_init() 73 static inline u32 __rds_ib_ring_used(struct rds_ib_work_ring *ring) in __rds_ib_ring_used() argument 78 diff = ring->w_alloc_ctr - (u32) atomic_read(&ring->w_free_ctr); in __rds_ib_ring_used() 79 BUG_ON(diff > ring->w_nr); in __rds_ib_ring_used() 84 void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr) in rds_ib_ring_resize() argument 88 BUG_ON(__rds_ib_ring_used(ring)); in rds_ib_ring_resize() 89 ring->w_nr = nr; in rds_ib_ring_resize() [all …]
|
/Linux-v5.10/tools/testing/selftests/net/ |
D | psock_tpacket.c | 66 struct ring { struct 71 void (*walk)(int sock, struct ring *ring); argument 220 static void walk_v1_v2_rx(int sock, struct ring *ring) in walk_v1_v2_rx() argument 227 bug_on(ring->type != PACKET_RX_RING); in walk_v1_v2_rx() 239 while (__v1_v2_rx_kernel_ready(ring->rd[frame_num].iov_base, in walk_v1_v2_rx() 240 ring->version)) { in walk_v1_v2_rx() 241 ppd.raw = ring->rd[frame_num].iov_base; in walk_v1_v2_rx() 243 switch (ring->version) { in walk_v1_v2_rx() 260 __v1_v2_rx_user_ready(ppd.raw, ring->version); in walk_v1_v2_rx() 262 frame_num = (frame_num + 1) % ring->rd_num; in walk_v1_v2_rx() [all …]
|
/Linux-v5.10/drivers/net/ethernet/aquantia/atlantic/ |
D | aq_vec.c | 27 struct aq_ring_s ring[AQ_CFG_TCS_MAX][2]; member 37 struct aq_ring_s *ring = NULL; in aq_vec_poll() local 46 for (i = 0U, ring = self->ring[0]; in aq_vec_poll() 47 self->tx_rings > i; ++i, ring = self->ring[i]) { in aq_vec_poll() 48 u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp); in aq_vec_poll() 49 ring[AQ_VEC_RX_ID].stats.rx.polls++; in aq_vec_poll() 50 u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp); in aq_vec_poll() 54 &ring[AQ_VEC_TX_ID]); in aq_vec_poll() 59 if (ring[AQ_VEC_TX_ID].sw_head != in aq_vec_poll() 60 ring[AQ_VEC_TX_ID].hw_head) { in aq_vec_poll() [all …]
|