/Linux-v4.19/net/rose/ |
D | rose_in.c | 107 int queued = 0; in rose_state3_machine() local 170 queued = 1; in rose_state3_machine() 207 return queued; in rose_state3_machine() 268 int queued = 0, frametype, ns, nr, q, d, m; in rose_process_rx_frame() local 277 queued = rose_state1_machine(sk, skb, frametype); in rose_process_rx_frame() 280 queued = rose_state2_machine(sk, skb, frametype); in rose_process_rx_frame() 283 queued = rose_state3_machine(sk, skb, frametype, ns, nr, q, d, m); in rose_process_rx_frame() 286 queued = rose_state4_machine(sk, skb, frametype); in rose_process_rx_frame() 289 queued = rose_state5_machine(sk, skb, frametype); in rose_process_rx_frame() 295 return queued; in rose_process_rx_frame()
|
/Linux-v4.19/net/x25/ |
D | x25_in.c | 206 int queued = 0; in x25_state3_machine() local 273 queued = 1; in x25_state3_machine() 311 queued = !sock_queue_rcv_skb(sk, skb); in x25_state3_machine() 315 queued = 1; in x25_state3_machine() 326 return queued; in x25_state3_machine() 385 int queued = 0, frametype, ns, nr, q, d, m; in x25_process_rx_frame() local 394 queued = x25_state1_machine(sk, skb, frametype); in x25_process_rx_frame() 397 queued = x25_state2_machine(sk, skb, frametype); in x25_process_rx_frame() 400 queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m); in x25_process_rx_frame() 403 queued = x25_state4_machine(sk, skb, frametype); in x25_process_rx_frame() [all …]
|
D | x25_dev.c | 56 int queued = 1; in x25_receive_data() local 61 queued = x25_process_rx_frame(sk, skb); in x25_receive_data() 63 queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf); in x25_receive_data() 67 return queued; in x25_receive_data()
|
/Linux-v4.19/net/dccp/ |
D | input.c | 49 int queued = 0; in dccp_rcv_close() local 80 queued = 1; in dccp_rcv_close() 90 return queued; in dccp_rcv_close() 95 int queued = 0; in dccp_rcv_closereq() local 105 return queued; in dccp_rcv_closereq() 117 queued = 1; in dccp_rcv_closereq() 124 return queued; in dccp_rcv_closereq() 528 int queued = 0; in dccp_rcv_respond_partopen_state_process() local 566 queued = 1; /* packet was queued in dccp_rcv_respond_partopen_state_process() 572 return queued; in dccp_rcv_respond_partopen_state_process() [all …]
|
/Linux-v4.19/net/ax25/ |
D | ax25_std_in.c | 146 int queued = 0; in ax25_std_state3_machine() local 228 queued = ax25_rx_iframe(ax25, skb); in ax25_std_state3_machine() 261 return queued; in ax25_std_state3_machine() 271 int queued = 0; in ax25_std_state4_machine() local 383 queued = ax25_rx_iframe(ax25, skb); in ax25_std_state4_machine() 416 return queued; in ax25_std_state4_machine() 424 int queued = 0, frametype, ns, nr, pf; in ax25_std_frame_in() local 430 queued = ax25_std_state1_machine(ax25, skb, frametype, pf, type); in ax25_std_frame_in() 433 queued = ax25_std_state2_machine(ax25, skb, frametype, pf, type); in ax25_std_frame_in() 436 queued = ax25_std_state3_machine(ax25, skb, frametype, ns, nr, pf, type); in ax25_std_frame_in() [all …]
|
D | ax25_ds_in.c | 150 int queued = 0; in ax25_ds_state3_machine() local 243 queued = ax25_rx_iframe(ax25, skb); in ax25_ds_state3_machine() 276 return queued; in ax25_ds_state3_machine() 284 int queued = 0, frametype, ns, nr, pf; in ax25_ds_frame_in() local 290 queued = ax25_ds_state1_machine(ax25, skb, frametype, pf, type); in ax25_ds_frame_in() 293 queued = ax25_ds_state2_machine(ax25, skb, frametype, pf, type); in ax25_ds_frame_in() 296 queued = ax25_ds_state3_machine(ax25, skb, frametype, ns, nr, pf, type); in ax25_ds_frame_in() 300 return queued; in ax25_ds_frame_in()
|
D | ax25_in.c | 106 int queued = 0; in ax25_rx_iframe() local 148 queued = 1; in ax25_rx_iframe() 154 return queued; in ax25_rx_iframe() 162 int queued = 0; in ax25_process_rx_frame() local 170 queued = ax25_std_frame_in(ax25, skb, type); in ax25_process_rx_frame() 176 queued = ax25_ds_frame_in(ax25, skb, type); in ax25_process_rx_frame() 178 queued = ax25_std_frame_in(ax25, skb, type); in ax25_process_rx_frame() 183 return queued; in ax25_process_rx_frame()
|
/Linux-v4.19/net/netrom/ |
D | nr_in.c | 156 int queued = 0; in nr_state3_machine() local 229 queued = 1; in nr_state3_machine() 276 return queued; in nr_state3_machine() 283 int queued = 0, frametype; in nr_process_rx_frame() local 292 queued = nr_state1_machine(sk, skb, frametype); in nr_process_rx_frame() 295 queued = nr_state2_machine(sk, skb, frametype); in nr_process_rx_frame() 298 queued = nr_state3_machine(sk, skb, frametype); in nr_process_rx_frame() 304 return queued; in nr_process_rx_frame()
|
/Linux-v4.19/drivers/gpu/drm/ |
D | drm_flip_work.c | 60 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task() 104 list_splice_tail(&work->queued, &work->commited); in drm_flip_work_commit() 105 INIT_LIST_HEAD(&work->queued); in drm_flip_work_commit() 148 INIT_LIST_HEAD(&work->queued); in drm_flip_work_init() 165 WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited)); in drm_flip_work_cleanup()
|
/Linux-v4.19/virt/kvm/ |
D | async_pf.c | 156 vcpu->async_pf.queued = 0; in kvm_clear_async_pf_completion_queue() 175 vcpu->async_pf.queued--; in kvm_check_async_pf_completion() 185 if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) in kvm_setup_async_pf() 217 vcpu->async_pf.queued++; in kvm_setup_async_pf() 245 vcpu->async_pf.queued++; in kvm_async_pf_wakeup_all()
|
/Linux-v4.19/drivers/md/ |
D | dm-cache-background-tracker.c | 26 struct list_head queued; member 47 INIT_LIST_HEAD(&b->queued); in btracker_create() 205 list_add(&w->list, &b->queued); in btracker_queue() 219 if (list_empty(&b->queued)) in btracker_issue() 222 w = list_first_entry(&b->queued, struct bt_work, list); in btracker_issue()
|
/Linux-v4.19/fs/xfs/ |
D | xfs_mru_cache.c | 102 unsigned int queued; /* work has been queued */ member 204 if (!mru->queued) { in _xfs_mru_cache_list_insert() 205 mru->queued = 1; in _xfs_mru_cache_list_insert() 280 mru->queued = next; in _xfs_mru_cache_reap() 281 if ((mru->queued > 0)) { in _xfs_mru_cache_reap() 388 if (mru->queued) { in xfs_mru_cache_flush()
|
/Linux-v4.19/drivers/media/platform/vsp1/ |
D | vsp1_dl.c | 224 struct vsp1_dl_list *queued; member 826 if (!dlm->queued) in vsp1_dl_list_hw_update_pending() 882 __vsp1_dl_list_put(dlm->queued); in vsp1_dl_list_commit_continuous() 883 dlm->queued = dl; in vsp1_dl_list_commit_continuous() 988 if (dlm->queued) { in vsp1_dlm_irq_frame_end() 989 if (dlm->queued->internal) in vsp1_dlm_irq_frame_end() 991 dlm->queued->internal = false; in vsp1_dlm_irq_frame_end() 994 dlm->active = dlm->queued; in vsp1_dlm_irq_frame_end() 995 dlm->queued = NULL; in vsp1_dlm_irq_frame_end() 1006 dlm->queued = dlm->pending; in vsp1_dlm_irq_frame_end() [all …]
|
/Linux-v4.19/Documentation/features/locking/queued-rwlocks/ |
D | arch-support.txt | 2 # Feature name: queued-rwlocks 4 # description: arch supports queued rwlocks
|
/Linux-v4.19/Documentation/features/locking/queued-spinlocks/ |
D | arch-support.txt | 2 # Feature name: queued-spinlocks 4 # description: arch supports queued spinlocks
|
/Linux-v4.19/Documentation/media/uapi/v4l/ |
D | vidioc-streamon.rst | 51 If ``VIDIOC_STREAMON`` fails then any already queued buffers will remain 52 queued. 63 If buffers have been queued with :ref:`VIDIOC_QBUF` and 65 ``VIDIOC_STREAMON``, then those queued buffers will also be removed from 77 but ``VIDIOC_STREAMOFF`` will return queued buffers to their starting
|
/Linux-v4.19/sound/firewire/fireworks/ |
D | fireworks_hwdep.c | 128 bool queued; in hwdep_read() local 133 queued = efw->push_ptr != efw->pull_ptr; in hwdep_read() 135 while (!dev_lock_changed && !queued) { in hwdep_read() 144 queued = efw->push_ptr != efw->pull_ptr; in hwdep_read() 151 else if (queued) in hwdep_read()
|
/Linux-v4.19/drivers/net/wireless/mediatek/mt76/ |
D | dma.c | 90 q->queued++; in mt76_dma_add_buf() 155 while (q->queued && q->tail != last) { in mt76_dma_tx_cleanup() 169 q->queued--; in mt76_dma_tx_cleanup() 180 wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; in mt76_dma_tx_cleanup() 182 if (!q->queued) in mt76_dma_tx_cleanup() 224 if (!q->queued) in mt76_dma_dequeue() 231 q->queued--; in mt76_dma_dequeue() 296 if (q->queued + (n + 1) / 2 >= q->ndesc - 1) in mt76_dma_tx_queue_skb() 334 while (q->queued < q->ndesc - 1) { in mt76_dma_rx_fill()
|
D | usb.c | 311 if (q->queued > 0) { in mt76u_get_next_rx_entry() 314 q->queued--; in mt76u_get_next_rx_entry() 402 q->queued++; in mt76u_complete_rx() 454 q->queued = 0; in mt76u_submit_rx_buffers() 560 if (!buf->done || !q->queued) in mt76u_tx_tasklet() 573 q->queued--; in mt76u_tx_tasklet() 576 wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; in mt76u_tx_tasklet() 577 if (!q->queued) in mt76u_tx_tasklet() 661 if (q->queued == q->ndesc) in mt76u_tx_queue_skb() 681 q->queued++; in mt76u_tx_queue_skb()
|
/Linux-v4.19/Documentation/usb/ |
D | ohci.txt | 18 - interrupt transfers can be larger, and can be queued 24 types can be queued. That was also true in "usb-ohci", except for interrupt 26 to overhead in IRQ processing. When interrupt transfers are queued, those
|
/Linux-v4.19/block/ |
D | blk-flush.c | 171 bool queued = false, kicked; in blk_flush_complete_seq() local 194 queued = blk_flush_queue_rq(rq, true); in blk_flush_complete_seq() 218 return kicked | queued; in blk_flush_complete_seq() 225 bool queued = false; in flush_end_io() local 259 queued |= blk_flush_complete_seq(rq, fq, seq, error); in flush_end_io() 273 if (queued || fq->flush_queue_delayed) { in flush_end_io()
|
D | blk-throttle.c | 80 struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */ member 408 struct list_head *queued) in throtl_qnode_add_bio() argument 412 list_add_tail(&qn->node, queued); in throtl_qnode_add_bio() 421 static struct bio *throtl_peek_queued(struct list_head *queued) in throtl_peek_queued() argument 423 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node); in throtl_peek_queued() 426 if (list_empty(queued)) in throtl_peek_queued() 448 static struct bio *throtl_pop_queued(struct list_head *queued, in throtl_pop_queued() argument 451 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node); in throtl_pop_queued() 454 if (list_empty(queued)) in throtl_pop_queued() 467 list_move_tail(&qn->node, queued); in throtl_pop_queued() [all …]
|
/Linux-v4.19/drivers/crypto/inside-secure/ |
D | safexcel_hash.c | 213 u64 queued, len, cache_len; in safexcel_ahash_send_req() local 215 queued = len = safexcel_queued_len(req); in safexcel_ahash_send_req() 216 if (queued <= crypto_ahash_blocksize(ahash)) in safexcel_ahash_send_req() 217 cache_len = queued; in safexcel_ahash_send_req() 219 cache_len = queued - areq->nbytes; in safexcel_ahash_send_req() 225 extra = queued & (crypto_ahash_blocksize(ahash) - 1); in safexcel_ahash_send_req() 237 queued -= extra; in safexcel_ahash_send_req() 240 if (!queued) { in safexcel_ahash_send_req() 266 queued -= cache_len; in safexcel_ahash_send_req() 267 if (!queued) in safexcel_ahash_send_req() [all …]
|
/Linux-v4.19/Documentation/core-api/ |
D | workqueue.rst | 25 When a new work item gets queued, the worker begins executing again. 81 off of the queue, one after the other. If no work is queued, the 87 which manages worker-pools and processes the queued work items. 91 worker-pools to serve work items queued on unbound workqueues - the 102 When a work item is queued to a workqueue, the target worker-pool is 106 be queued on the worklist of either normal or highpri worker-pool that 143 on code paths that handle memory reclaim are required to be queued on 168 Work items queued to an unbound wq are served by the special 195 Work items of a highpri wq are queued to the highpri 247 achieve this behavior. Work items on such wq were always queued to the [all …]
|
/Linux-v4.19/net/decnet/ |
D | dn_nsp_in.c | 617 int queued = 0; in dn_nsp_otherdata() local 630 queued = 1; in dn_nsp_otherdata() 636 if (!queued) in dn_nsp_otherdata() 642 int queued = 0; in dn_nsp_data() local 656 queued = 1; in dn_nsp_data() 667 if (!queued) in dn_nsp_data()
|