/Linux-v5.4/net/rose/ |
D | rose_in.c | 104 int queued = 0; in rose_state3_machine() local 167 queued = 1; in rose_state3_machine() 204 return queued; in rose_state3_machine() 265 int queued = 0, frametype, ns, nr, q, d, m; in rose_process_rx_frame() local 274 queued = rose_state1_machine(sk, skb, frametype); in rose_process_rx_frame() 277 queued = rose_state2_machine(sk, skb, frametype); in rose_process_rx_frame() 280 queued = rose_state3_machine(sk, skb, frametype, ns, nr, q, d, m); in rose_process_rx_frame() 283 queued = rose_state4_machine(sk, skb, frametype); in rose_process_rx_frame() 286 queued = rose_state5_machine(sk, skb, frametype); in rose_process_rx_frame() 292 return queued; in rose_process_rx_frame()
|
/Linux-v5.4/net/x25/ |
D | x25_in.c | 210 int queued = 0; in x25_state3_machine() local 277 queued = 1; in x25_state3_machine() 315 queued = !sock_queue_rcv_skb(sk, skb); in x25_state3_machine() 319 queued = 1; in x25_state3_machine() 330 return queued; in x25_state3_machine() 389 int queued = 0, frametype, ns, nr, q, d, m; in x25_process_rx_frame() local 398 queued = x25_state1_machine(sk, skb, frametype); in x25_process_rx_frame() 401 queued = x25_state2_machine(sk, skb, frametype); in x25_process_rx_frame() 404 queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m); in x25_process_rx_frame() 407 queued = x25_state4_machine(sk, skb, frametype); in x25_process_rx_frame() [all …]
|
D | x25_dev.c | 51 int queued = 1; in x25_receive_data() local 56 queued = x25_process_rx_frame(sk, skb); in x25_receive_data() 58 queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)); in x25_receive_data() 62 return queued; in x25_receive_data()
|
/Linux-v5.4/net/dccp/ |
D | input.c | 45 int queued = 0; in dccp_rcv_close() local 76 queued = 1; in dccp_rcv_close() 86 return queued; in dccp_rcv_close() 91 int queued = 0; in dccp_rcv_closereq() local 101 return queued; in dccp_rcv_closereq() 113 queued = 1; in dccp_rcv_closereq() 120 return queued; in dccp_rcv_closereq() 524 int queued = 0; in dccp_rcv_respond_partopen_state_process() local 562 queued = 1; /* packet was queued in dccp_rcv_respond_partopen_state_process() 568 return queued; in dccp_rcv_respond_partopen_state_process() [all …]
|
/Linux-v5.4/net/ax25/ |
D | ax25_std_in.c | 143 int queued = 0; in ax25_std_state3_machine() local 225 queued = ax25_rx_iframe(ax25, skb); in ax25_std_state3_machine() 258 return queued; in ax25_std_state3_machine() 268 int queued = 0; in ax25_std_state4_machine() local 380 queued = ax25_rx_iframe(ax25, skb); in ax25_std_state4_machine() 413 return queued; in ax25_std_state4_machine() 421 int queued = 0, frametype, ns, nr, pf; in ax25_std_frame_in() local 427 queued = ax25_std_state1_machine(ax25, skb, frametype, pf, type); in ax25_std_frame_in() 430 queued = ax25_std_state2_machine(ax25, skb, frametype, pf, type); in ax25_std_frame_in() 433 queued = ax25_std_state3_machine(ax25, skb, frametype, ns, nr, pf, type); in ax25_std_frame_in() [all …]
|
D | ax25_ds_in.c | 147 int queued = 0; in ax25_ds_state3_machine() local 240 queued = ax25_rx_iframe(ax25, skb); in ax25_ds_state3_machine() 273 return queued; in ax25_ds_state3_machine() 281 int queued = 0, frametype, ns, nr, pf; in ax25_ds_frame_in() local 287 queued = ax25_ds_state1_machine(ax25, skb, frametype, pf, type); in ax25_ds_frame_in() 290 queued = ax25_ds_state2_machine(ax25, skb, frametype, pf, type); in ax25_ds_frame_in() 293 queued = ax25_ds_state3_machine(ax25, skb, frametype, ns, nr, pf, type); in ax25_ds_frame_in() 297 return queued; in ax25_ds_frame_in()
|
D | ax25_in.c | 103 int queued = 0; in ax25_rx_iframe() local 145 queued = 1; in ax25_rx_iframe() 151 return queued; in ax25_rx_iframe() 159 int queued = 0; in ax25_process_rx_frame() local 167 queued = ax25_std_frame_in(ax25, skb, type); in ax25_process_rx_frame() 173 queued = ax25_ds_frame_in(ax25, skb, type); in ax25_process_rx_frame() 175 queued = ax25_std_frame_in(ax25, skb, type); in ax25_process_rx_frame() 180 return queued; in ax25_process_rx_frame()
|
/Linux-v5.4/net/netrom/ |
D | nr_in.c | 153 int queued = 0; in nr_state3_machine() local 226 queued = 1; in nr_state3_machine() 273 return queued; in nr_state3_machine() 280 int queued = 0, frametype; in nr_process_rx_frame() local 289 queued = nr_state1_machine(sk, skb, frametype); in nr_process_rx_frame() 292 queued = nr_state2_machine(sk, skb, frametype); in nr_process_rx_frame() 295 queued = nr_state3_machine(sk, skb, frametype); in nr_process_rx_frame() 301 return queued; in nr_process_rx_frame()
|
/Linux-v5.4/drivers/gpu/drm/ |
D | drm_flip_work.c | 63 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task() 107 list_splice_tail(&work->queued, &work->commited); in drm_flip_work_commit() 108 INIT_LIST_HEAD(&work->queued); in drm_flip_work_commit() 151 INIT_LIST_HEAD(&work->queued); in drm_flip_work_init() 168 WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited)); in drm_flip_work_cleanup()
|
/Linux-v5.4/drivers/md/ |
D | dm-cache-background-tracker.c | 26 struct list_head queued; member 47 INIT_LIST_HEAD(&b->queued); in btracker_create() 205 list_add(&w->list, &b->queued); in btracker_queue() 219 if (list_empty(&b->queued)) in btracker_issue() 222 w = list_first_entry(&b->queued, struct bt_work, list); in btracker_issue()
|
/Linux-v5.4/virt/kvm/ |
D | async_pf.c | 144 vcpu->async_pf.queued = 0; in kvm_clear_async_pf_completion_queue() 163 vcpu->async_pf.queued--; in kvm_check_async_pf_completion() 173 if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) in kvm_setup_async_pf() 205 vcpu->async_pf.queued++; in kvm_setup_async_pf() 233 vcpu->async_pf.queued++; in kvm_async_pf_wakeup_all()
|
/Linux-v5.4/fs/xfs/ |
D | xfs_mru_cache.c | 102 unsigned int queued; /* work has been queued */ member 204 if (!mru->queued) { in _xfs_mru_cache_list_insert() 205 mru->queued = 1; in _xfs_mru_cache_list_insert() 280 mru->queued = next; in _xfs_mru_cache_reap() 281 if ((mru->queued > 0)) { in _xfs_mru_cache_reap() 388 if (mru->queued) { in xfs_mru_cache_flush()
|
/Linux-v5.4/Documentation/media/uapi/mediactl/ |
D | media-request-ioc-queue.rst | 59 If the request was successfully queued, then the file descriptor can be 62 If the request was already queued before, then ``EBUSY`` is returned. 67 Once a request is queued, then the driver is required to gracefully handle 74 queued directly and you next try to queue a request, or vice versa. 87 The request was already queued or the application queued the first
|
D | request-api.rst | 47 buffer queues since in practice only one buffer would be queued at a time. 82 instead of being immediately applied, and buffers queued to a request do not 83 enter the regular buffer queue until the request itself is queued. 89 queued by calling :ref:`MEDIA_REQUEST_IOC_QUEUE` on the request file descriptor. 91 A queued request cannot be modified anymore. 109 a buffer was queued via a request or vice versa will result in an ``EBUSY`` 132 request that has been queued but not yet completed will return ``EBUSY`` 144 longer in use by the kernel. That is, if the request is queued and then the 188 Once the request is fully prepared, it can be queued to the driver: 268 Once the request is fully prepared, it can be queued to the driver:
|
D | media-request-ioc-reinit.rst | 64 A request can only be re-initialized if it either has not been queued 65 yet, or if it was queued and completed. Otherwise it will set ``errno`` 75 The request is queued but not yet completed.
|
/Linux-v5.4/drivers/media/platform/vsp1/ |
D | vsp1_dl.c | 224 struct vsp1_dl_list *queued; member 839 if (!dlm->queued) in vsp1_dl_list_hw_update_pending() 897 __vsp1_dl_list_put(dlm->queued); in vsp1_dl_list_commit_continuous() 898 dlm->queued = dl; in vsp1_dl_list_commit_continuous() 1018 if (dlm->queued) { in vsp1_dlm_irq_frame_end() 1019 if (dlm->queued->flags & VSP1_DL_FRAME_END_INTERNAL) in vsp1_dlm_irq_frame_end() 1021 dlm->queued->flags &= ~VSP1_DL_FRAME_END_INTERNAL; in vsp1_dlm_irq_frame_end() 1024 dlm->active = dlm->queued; in vsp1_dlm_irq_frame_end() 1025 dlm->queued = NULL; in vsp1_dlm_irq_frame_end() 1036 dlm->queued = dlm->pending; in vsp1_dlm_irq_frame_end() [all …]
|
/Linux-v5.4/Documentation/features/locking/queued-rwlocks/ |
D | arch-support.txt | 2 # Feature name: queued-rwlocks 4 # description: arch supports queued rwlocks
|
/Linux-v5.4/Documentation/features/locking/queued-spinlocks/ |
D | arch-support.txt | 2 # Feature name: queued-spinlocks 4 # description: arch supports queued spinlocks
|
/Linux-v5.4/sound/firewire/fireworks/ |
D | fireworks_hwdep.c | 127 bool queued; in hwdep_read() local 132 queued = efw->push_ptr != efw->pull_ptr; in hwdep_read() 134 while (!dev_lock_changed && !queued) { in hwdep_read() 143 queued = efw->push_ptr != efw->pull_ptr; in hwdep_read() 150 else if (queued) in hwdep_read()
|
/Linux-v5.4/Documentation/usb/ |
D | ohci.rst | 22 - interrupt transfers can be larger, and can be queued 28 types can be queued. That was also true in "usb-ohci", except for interrupt 30 to overhead in IRQ processing. When interrupt transfers are queued, those
|
/Linux-v5.4/Documentation/media/uapi/v4l/ |
D | vidioc-streamon.rst | 58 If ``VIDIOC_STREAMON`` fails then any already queued buffers will remain 59 queued. 70 If buffers have been queued with :ref:`VIDIOC_QBUF` and 72 ``VIDIOC_STREAMON``, then those queued buffers will also be removed from 84 but ``VIDIOC_STREAMOFF`` will return queued buffers to their starting
|
/Linux-v5.4/drivers/net/wireless/mediatek/mt76/ |
D | dma.c | 85 q->queued++; in mt76_dma_add_buf() 157 while ((q->queued > n_queued) && q->tail != last) { in mt76_dma_tx_cleanup() 180 q->queued -= n_queued; in mt76_dma_tx_cleanup() 192 qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; in mt76_dma_tx_cleanup() 196 if (!q->queued) in mt76_dma_tx_cleanup() 238 if (!q->queued) in mt76_dma_dequeue() 245 q->queued--; in mt76_dma_dequeue() 339 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { in mt76_dma_tx_queue_skb() 372 while (q->queued < q->ndesc - 1) { in mt76_dma_rx_fill()
|
D | usb.c | 395 if (q->queued > 0) { in mt76u_get_next_rx_entry() 398 q->queued--; in mt76u_get_next_rx_entry() 518 q->queued++; in mt76u_complete_rx() 573 q->queued = 0; in mt76u_submit_rx_buffers() 664 while (q->queued > n_dequeued) { in mt76u_tx_tasklet() 684 q->queued -= n_dequeued; in mt76u_tx_tasklet() 686 wake = q->stopped && q->queued < q->ndesc - 8; in mt76u_tx_tasklet() 690 if (!q->queued) in mt76u_tx_tasklet() 776 if (q->queued == q->ndesc) in mt76u_tx_queue_skb() 794 q->queued++; in mt76u_tx_queue_skb() [all …]
|
/Linux-v5.4/block/ |
D | blk-throttle.c | 80 struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */ member 407 struct list_head *queued) in throtl_qnode_add_bio() argument 411 list_add_tail(&qn->node, queued); in throtl_qnode_add_bio() 420 static struct bio *throtl_peek_queued(struct list_head *queued) in throtl_peek_queued() argument 422 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node); in throtl_peek_queued() 425 if (list_empty(queued)) in throtl_peek_queued() 447 static struct bio *throtl_pop_queued(struct list_head *queued, in throtl_pop_queued() argument 450 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node); in throtl_pop_queued() 453 if (list_empty(queued)) in throtl_pop_queued() 466 list_move_tail(&qn->node, queued); in throtl_pop_queued() [all …]
|
/Linux-v5.4/Documentation/core-api/ |
D | workqueue.rst | 25 When a new work item gets queued, the worker begins executing again. 81 off of the queue, one after the other. If no work is queued, the 87 which manages worker-pools and processes the queued work items. 91 worker-pools to serve work items queued on unbound workqueues - the 102 When a work item is queued to a workqueue, the target worker-pool is 106 be queued on the worklist of either normal or highpri worker-pool that 143 on code paths that handle memory reclaim are required to be queued on 168 Work items queued to an unbound wq are served by the special 195 Work items of a highpri wq are queued to the highpri 247 achieve this behavior. Work items on such wq were always queued to the [all …]
|