/Linux-v5.4/drivers/media/usb/uvc/ |
D | uvc_queue.c | 36 uvc_queue_to_stream(struct uvc_video_queue *queue) in uvc_queue_to_stream() argument 38 return container_of(queue, struct uvc_streaming, queue); in uvc_queue_to_stream() 51 static void uvc_queue_return_buffers(struct uvc_video_queue *queue, in uvc_queue_return_buffers() argument 58 while (!list_empty(&queue->irqqueue)) { in uvc_queue_return_buffers() 59 struct uvc_buffer *buf = list_first_entry(&queue->irqqueue, in uvc_queue_return_buffers() 61 queue); in uvc_queue_return_buffers() 62 list_del(&buf->queue); in uvc_queue_return_buffers() 76 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); in uvc_queue_setup() local 86 stream = uvc_queue_to_stream(queue); in uvc_queue_setup() 107 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_prepare() local [all …]
|
/Linux-v5.4/drivers/usb/gadget/function/ |
D | uvc_queue.c | 44 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); in uvc_queue_setup() local 45 struct uvc_video *video = container_of(queue, struct uvc_video, queue); in uvc_queue_setup() 59 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_prepare() local 69 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) in uvc_buffer_prepare() 85 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_queue() local 90 spin_lock_irqsave(&queue->irqlock, flags); in uvc_buffer_queue() 92 if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) { in uvc_buffer_queue() 93 list_add_tail(&buf->queue, &queue->irqqueue); in uvc_buffer_queue() 102 spin_unlock_irqrestore(&queue->irqlock, flags); in uvc_buffer_queue() 113 int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, in uvcg_queue_init() argument [all …]
|
/Linux-v5.4/drivers/net/wireless/st/cw1200/ |
D | queue.c | 26 static inline void __cw1200_queue_lock(struct cw1200_queue *queue) in __cw1200_queue_lock() argument 28 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_lock() 29 if (queue->tx_locked_cnt++ == 0) { in __cw1200_queue_lock() 31 queue->queue_id); in __cw1200_queue_lock() 32 ieee80211_stop_queue(stats->priv->hw, queue->queue_id); in __cw1200_queue_lock() 36 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) in __cw1200_queue_unlock() argument 38 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_unlock() 39 BUG_ON(!queue->tx_locked_cnt); in __cw1200_queue_unlock() 40 if (--queue->tx_locked_cnt == 0) { in __cw1200_queue_unlock() 42 queue->queue_id); in __cw1200_queue_unlock() [all …]
|
/Linux-v5.4/drivers/staging/vc04_services/interface/vchiq_arm/ |
D | vchiq_util.c | 11 int vchiu_queue_init(struct vchiu_queue *queue, int size) in vchiu_queue_init() argument 15 queue->size = size; in vchiu_queue_init() 16 queue->read = 0; in vchiu_queue_init() 17 queue->write = 0; in vchiu_queue_init() 18 queue->initialized = 1; in vchiu_queue_init() 20 init_completion(&queue->pop); in vchiu_queue_init() 21 init_completion(&queue->push); in vchiu_queue_init() 23 queue->storage = kcalloc(size, sizeof(struct vchiq_header *), in vchiu_queue_init() 25 if (!queue->storage) { in vchiu_queue_init() 26 vchiu_queue_delete(queue); in vchiu_queue_init() [all …]
|
/Linux-v5.4/drivers/net/wireless/broadcom/b43legacy/ |
D | pio.c | 22 static void tx_start(struct b43legacy_pioqueue *queue) in tx_start() argument 24 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_start() 28 static void tx_octet(struct b43legacy_pioqueue *queue, in tx_octet() argument 31 if (queue->need_workarounds) { in tx_octet() 32 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 33 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 36 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 38 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 63 static void tx_data(struct b43legacy_pioqueue *queue, in tx_data() argument 71 if (queue->need_workarounds) { in tx_data() [all …]
|
/Linux-v5.4/drivers/iio/buffer/ |
D | industrialio-buffer-dma.c | 101 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size), in iio_buffer_block_release() 104 iio_buffer_put(&block->queue->buffer); in iio_buffer_block_release() 167 struct iio_dma_buffer_queue *queue, size_t size) in iio_dma_buffer_alloc_block() argument 175 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), in iio_dma_buffer_alloc_block() 184 block->queue = queue; in iio_dma_buffer_alloc_block() 188 iio_buffer_get(&queue->buffer); in iio_dma_buffer_alloc_block() 195 struct iio_dma_buffer_queue *queue = block->queue; in _iio_dma_buffer_block_done() local 203 list_add_tail(&block->head, &queue->outgoing); in _iio_dma_buffer_block_done() 216 struct iio_dma_buffer_queue *queue = block->queue; in iio_dma_buffer_block_done() local 219 spin_lock_irqsave(&queue->list_lock, flags); in iio_dma_buffer_block_done() [all …]
|
/Linux-v5.4/drivers/net/xen-netback/ |
D | rx.c | 36 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) in xenvif_rx_ring_slots_available() argument 42 skb = skb_peek(&queue->rx_queue); in xenvif_rx_ring_slots_available() 53 prod = queue->rx.sring->req_prod; in xenvif_rx_ring_slots_available() 54 cons = queue->rx.req_cons; in xenvif_rx_ring_slots_available() 59 queue->rx.sring->req_event = prod + 1; in xenvif_rx_ring_slots_available() 65 } while (queue->rx.sring->req_prod != prod); in xenvif_rx_ring_slots_available() 70 void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) in xenvif_rx_queue_tail() argument 74 spin_lock_irqsave(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail() 76 __skb_queue_tail(&queue->rx_queue, skb); in xenvif_rx_queue_tail() 78 queue->rx_queue_len += skb->len; in xenvif_rx_queue_tail() [all …]
|
D | netback.c | 99 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, 102 static void make_tx_response(struct xenvif_queue *queue, 106 static void push_tx_responses(struct xenvif_queue *queue); 108 static inline int tx_work_todo(struct xenvif_queue *queue); 110 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, in idx_to_pfn() argument 113 return page_to_pfn(queue->mmap_pages[idx]); in idx_to_pfn() 116 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue, in idx_to_kaddr() argument 119 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx)); in idx_to_kaddr() 152 void xenvif_kick_thread(struct xenvif_queue *queue) in xenvif_kick_thread() argument 154 wake_up(&queue->wq); in xenvif_kick_thread() [all …]
|
D | interface.c | 55 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, in xenvif_skb_zerocopy_prepare() argument 59 atomic_inc(&queue->inflight_packets); in xenvif_skb_zerocopy_prepare() 62 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) in xenvif_skb_zerocopy_complete() argument 64 atomic_dec(&queue->inflight_packets); in xenvif_skb_zerocopy_complete() 70 wake_up(&queue->dealloc_wq); in xenvif_skb_zerocopy_complete() 82 struct xenvif_queue *queue = dev_id; in xenvif_tx_interrupt() local 84 if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)) in xenvif_tx_interrupt() 85 napi_schedule(&queue->napi); in xenvif_tx_interrupt() 92 struct xenvif_queue *queue = in xenvif_poll() local 100 if (unlikely(queue->vif->disabled)) { in xenvif_poll() [all …]
|
/Linux-v5.4/drivers/nvme/target/ |
D | tcp.c | 46 struct nvmet_tcp_queue *queue; member 150 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, in nvmet_tcp_cmd_tag() argument 153 return cmd - queue->cmds; in nvmet_tcp_cmd_tag() 181 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) in nvmet_tcp_get_cmd() argument 185 cmd = list_first_entry_or_null(&queue->free_list, in nvmet_tcp_get_cmd() 201 if (unlikely(cmd == &cmd->queue->connect)) in nvmet_tcp_put_cmd() 204 list_add_tail(&cmd->entry, &cmd->queue->free_list); in nvmet_tcp_put_cmd() 207 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue) in nvmet_tcp_hdgst_len() argument 209 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvmet_tcp_hdgst_len() 212 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue) in nvmet_tcp_ddgst_len() argument [all …]
|
D | rdma.c | 40 struct nvmet_rdma_queue *queue; member 54 struct nvmet_rdma_queue *queue; member 133 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); 168 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) in nvmet_rdma_get_rsp() argument 173 spin_lock_irqsave(&queue->rsps_lock, flags); in nvmet_rdma_get_rsp() 174 rsp = list_first_entry_or_null(&queue->free_rsps, in nvmet_rdma_get_rsp() 178 spin_unlock_irqrestore(&queue->rsps_lock, flags); in nvmet_rdma_get_rsp() 186 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); in nvmet_rdma_get_rsp() 204 nvmet_rdma_free_rsp(rsp->queue->dev, rsp); in nvmet_rdma_put_rsp() 209 spin_lock_irqsave(&rsp->queue->rsps_lock, flags); in nvmet_rdma_put_rsp() [all …]
|
/Linux-v5.4/drivers/misc/genwqe/ |
D | card_ddcb.c | 82 static int queue_empty(struct ddcb_queue *queue) in queue_empty() argument 84 return queue->ddcb_next == queue->ddcb_act; in queue_empty() 87 static int queue_enqueued_ddcbs(struct ddcb_queue *queue) in queue_enqueued_ddcbs() argument 89 if (queue->ddcb_next >= queue->ddcb_act) in queue_enqueued_ddcbs() 90 return queue->ddcb_next - queue->ddcb_act; in queue_enqueued_ddcbs() 92 return queue->ddcb_max - (queue->ddcb_act - queue->ddcb_next); in queue_enqueued_ddcbs() 95 static int queue_free_ddcbs(struct ddcb_queue *queue) in queue_free_ddcbs() argument 97 int free_ddcbs = queue->ddcb_max - queue_enqueued_ddcbs(queue) - 1; in queue_free_ddcbs() 163 static void print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue) in print_ddcb_info() argument 174 cd->card_idx, queue->ddcb_act, queue->ddcb_next); in print_ddcb_info() [all …]
|
/Linux-v5.4/drivers/crypto/cavium/zip/ |
D | zip_device.c | 57 static inline u32 zip_cmd_queue_consumed(struct zip_device *zip_dev, int queue) in zip_cmd_queue_consumed() argument 59 return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) * in zip_cmd_queue_consumed() 81 u32 queue = 0; in zip_load_instr() local 91 queue = 0; in zip_load_instr() 93 queue = 1; in zip_load_instr() 95 zip_dbg("CPU Core: %d Queue number:%d", raw_smp_processor_id(), queue); in zip_load_instr() 98 spin_lock(&zip_dev->iq[queue].lock); in zip_load_instr() 109 zip_dbg("sw_head : %lx", zip_dev->iq[queue].sw_head); in zip_load_instr() 110 zip_dbg("sw_tail : %lx", zip_dev->iq[queue].sw_tail); in zip_load_instr() 112 consumed = zip_cmd_queue_consumed(zip_dev, queue); in zip_load_instr() [all …]
|
/Linux-v5.4/drivers/gpu/drm/nouveau/nvkm/falcon/ |
D | msgqueue.c | 36 msg_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue) in msg_queue_open() argument 40 mutex_lock(&queue->mutex); in msg_queue_open() 42 queue->position = nvkm_falcon_rd32(falcon, queue->tail_reg); in msg_queue_open() 48 msg_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, in msg_queue_close() argument 54 nvkm_falcon_wr32(falcon, queue->tail_reg, queue->position); in msg_queue_close() 56 mutex_unlock(&queue->mutex); in msg_queue_close() 60 msg_queue_empty(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue) in msg_queue_empty() argument 65 head = nvkm_falcon_rd32(falcon, queue->head_reg); in msg_queue_empty() 66 tail = nvkm_falcon_rd32(falcon, queue->tail_reg); in msg_queue_empty() 72 msg_queue_pop(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, in msg_queue_pop() argument [all …]
|
/Linux-v5.4/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_marker.c | 37 void vmw_marker_queue_init(struct vmw_marker_queue *queue) in vmw_marker_queue_init() argument 39 INIT_LIST_HEAD(&queue->head); in vmw_marker_queue_init() 40 queue->lag = 0; in vmw_marker_queue_init() 41 queue->lag_time = ktime_get_raw_ns(); in vmw_marker_queue_init() 42 spin_lock_init(&queue->lock); in vmw_marker_queue_init() 45 void vmw_marker_queue_takedown(struct vmw_marker_queue *queue) in vmw_marker_queue_takedown() argument 49 spin_lock(&queue->lock); in vmw_marker_queue_takedown() 50 list_for_each_entry_safe(marker, next, &queue->head, head) { in vmw_marker_queue_takedown() 53 spin_unlock(&queue->lock); in vmw_marker_queue_takedown() 56 int vmw_marker_push(struct vmw_marker_queue *queue, in vmw_marker_push() argument [all …]
|
/Linux-v5.4/drivers/net/ |
D | xen-netfront.c | 205 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, in xennet_get_rx_skb() argument 209 struct sk_buff *skb = queue->rx_skbs[i]; in xennet_get_rx_skb() 210 queue->rx_skbs[i] = NULL; in xennet_get_rx_skb() 214 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, in xennet_get_rx_ref() argument 218 grant_ref_t ref = queue->grant_rx_ref[i]; in xennet_get_rx_ref() 219 queue->grant_rx_ref[i] = GRANT_INVALID_REF; in xennet_get_rx_ref() 235 struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer); in rx_refill_timeout() local 236 napi_schedule(&queue->napi); in rx_refill_timeout() 239 static int netfront_tx_slot_available(struct netfront_queue *queue) in netfront_tx_slot_available() argument 241 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < in netfront_tx_slot_available() [all …]
|
/Linux-v5.4/drivers/net/wireless/ralink/rt2x00/ |
D | rt2x00queue.c | 25 struct data_queue *queue = entry->queue; in rt2x00queue_alloc_rxskb() local 26 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2x00queue_alloc_rxskb() 37 frame_size = queue->data_size + queue->desc_size + queue->winfo_size; in rt2x00queue_alloc_rxskb() 95 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_map_txskb() 112 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_unmap_skb() 490 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00queue_write_tx_data() 502 entry->queue->qid, DRV_PROJECT); in rt2x00queue_write_tx_data() 531 struct data_queue *queue = entry->queue; in rt2x00queue_write_tx_descriptor() local 533 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc); in rt2x00queue_write_tx_descriptor() 539 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry); in rt2x00queue_write_tx_descriptor() [all …]
|
/Linux-v5.4/drivers/nvme/host/ |
D | tcp.c | 33 struct nvme_tcp_queue *queue; member 130 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) in nvme_tcp_queue_id() argument 132 return queue - queue->ctrl->queues; in nvme_tcp_queue_id() 135 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) in nvme_tcp_tagset() argument 137 u32 queue_idx = nvme_tcp_queue_id(queue); in nvme_tcp_tagset() 140 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset() 141 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_tcp_tagset() 144 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue) in nvme_tcp_hdgst_len() argument 146 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_hdgst_len() 149 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue) in nvme_tcp_ddgst_len() argument [all …]
|
/Linux-v5.4/drivers/scsi/arm/ |
D | queue.c | 55 int queue_initialise (Queue_t *queue) in queue_initialise() argument 60 spin_lock_init(&queue->queue_lock); in queue_initialise() 61 INIT_LIST_HEAD(&queue->head); in queue_initialise() 62 INIT_LIST_HEAD(&queue->free); in queue_initialise() 70 queue->alloc = q = kmalloc_array(nqueues, sizeof(QE_t), GFP_KERNEL); in queue_initialise() 75 list_add(&q->list, &queue->free); in queue_initialise() 79 return queue->alloc != NULL; in queue_initialise() 87 void queue_free (Queue_t *queue) in queue_free() argument 89 if (!list_empty(&queue->head)) in queue_free() 90 printk(KERN_WARNING "freeing non-empty queue %p\n", queue); in queue_free() [all …]
|
/Linux-v5.4/drivers/net/ethernet/ibm/ehea/ |
D | ehea_qmr.h | 196 static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) in hw_qeit_calc() argument 200 if (q_offset >= queue->queue_length) in hw_qeit_calc() 201 q_offset -= queue->queue_length; in hw_qeit_calc() 202 current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT]; in hw_qeit_calc() 206 static inline void *hw_qeit_get(struct hw_queue *queue) in hw_qeit_get() argument 208 return hw_qeit_calc(queue, queue->current_q_offset); in hw_qeit_get() 211 static inline void hw_qeit_inc(struct hw_queue *queue) in hw_qeit_inc() argument 213 queue->current_q_offset += queue->qe_size; in hw_qeit_inc() 214 if (queue->current_q_offset >= queue->queue_length) { in hw_qeit_inc() 215 queue->current_q_offset = 0; in hw_qeit_inc() [all …]
|
/Linux-v5.4/drivers/crypto/hisilicon/sec/ |
D | sec_drv.c | 227 static int sec_queue_map_io(struct sec_queue *queue) in sec_queue_map_io() argument 229 struct device *dev = queue->dev_info->dev; in sec_queue_map_io() 234 2 + queue->queue_id); in sec_queue_map_io() 237 queue->queue_id); in sec_queue_map_io() 240 queue->regs = ioremap(res->start, resource_size(res)); in sec_queue_map_io() 241 if (!queue->regs) in sec_queue_map_io() 247 static void sec_queue_unmap_io(struct sec_queue *queue) in sec_queue_unmap_io() argument 249 iounmap(queue->regs); in sec_queue_unmap_io() 252 static int sec_queue_ar_pkgattr(struct sec_queue *queue, u32 ar_pkg) in sec_queue_ar_pkgattr() argument 254 void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG; in sec_queue_ar_pkgattr() [all …]
|
/Linux-v5.4/drivers/soc/ixp4xx/ |
D | ixp4xx-qmgr.c | 28 void qmgr_put_entry(unsigned int queue, u32 val) in qmgr_put_entry() argument 31 BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */ in qmgr_put_entry() 34 qmgr_queue_descs[queue], queue, val); in qmgr_put_entry() 36 __raw_writel(val, &qmgr_regs->acc[queue][0]); in qmgr_put_entry() 39 u32 qmgr_get_entry(unsigned int queue) in qmgr_get_entry() argument 42 val = __raw_readl(&qmgr_regs->acc[queue][0]); in qmgr_get_entry() 44 BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */ in qmgr_get_entry() 47 qmgr_queue_descs[queue], queue, val); in qmgr_get_entry() 52 static int __qmgr_get_stat1(unsigned int queue) in __qmgr_get_stat1() argument 54 return (__raw_readl(&qmgr_regs->stat1[queue >> 3]) in __qmgr_get_stat1() [all …]
|
/Linux-v5.4/net/sunrpc/ |
D | sched.c | 84 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) in __rpc_disable_timer() argument 91 if (list_empty(&queue->timer_list.list)) in __rpc_disable_timer() 92 cancel_delayed_work(&queue->timer_list.dwork); in __rpc_disable_timer() 96 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) in rpc_set_queue_timer() argument 99 queue->timer_list.expires = expires; in rpc_set_queue_timer() 104 mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires); in rpc_set_queue_timer() 111 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, in __rpc_add_timer() argument 118 if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires)) in __rpc_add_timer() 119 rpc_set_queue_timer(queue, timeout); in __rpc_add_timer() 120 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); in __rpc_add_timer() [all …]
|
/Linux-v5.4/include/drm/ |
D | spsc_queue.h | 48 static inline void spsc_queue_init(struct spsc_queue *queue) in spsc_queue_init() argument 50 queue->head = NULL; in spsc_queue_init() 51 atomic_long_set(&queue->tail, (long)&queue->head); in spsc_queue_init() 52 atomic_set(&queue->job_count, 0); in spsc_queue_init() 55 static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue) in spsc_queue_peek() argument 57 return queue->head; in spsc_queue_peek() 60 static inline int spsc_queue_count(struct spsc_queue *queue) in spsc_queue_count() argument 62 return atomic_read(&queue->job_count); in spsc_queue_count() 65 static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node) in spsc_queue_push() argument 73 tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next); in spsc_queue_push() [all …]
|
/Linux-v5.4/drivers/net/wireless/ath/ath5k/ |
D | qcu.c | 63 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_num_tx_pending() argument 66 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); in ath5k_hw_num_tx_pending() 69 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) in ath5k_hw_num_tx_pending() 76 pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue)); in ath5k_hw_num_tx_pending() 82 if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) in ath5k_hw_num_tx_pending() 94 ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_release_tx_queue() argument 96 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num)) in ath5k_hw_release_tx_queue() 100 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE; in ath5k_hw_release_tx_queue() 102 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue); in ath5k_hw_release_tx_queue() 138 ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, in ath5k_hw_get_tx_queueprops() argument [all …]
|