/Linux-v5.10/include/drm/ |
D | gpu_scheduler.h | 55 * @rq: runqueue on which this entity is currently scheduled. 57 * Jobs from this entity can be scheduled on any scheduler 67 * The &drm_sched_fence.scheduled uses the 75 * @last_scheduled: points to the finished fence of the last scheduled job. 107 * struct drm_sched_rq - queue of entities to be scheduled. 111 * @entities: list of the entities to be scheduled. 112 * @current_entity: the entity which is to be scheduled. 130 * @scheduled: this fence is what will be signaled by the scheduler 131 * when the job is scheduled. 133 struct dma_fence scheduled; member [all …]
|
/Linux-v5.10/drivers/gpu/drm/scheduler/ |
D | sched_fence.c | 53 int ret = dma_fence_signal(&fence->scheduled); in drm_sched_fence_scheduled() 56 DMA_FENCE_TRACE(&fence->scheduled, in drm_sched_fence_scheduled() 59 DMA_FENCE_TRACE(&fence->scheduled, in drm_sched_fence_scheduled() 122 * Drop the extra reference from the scheduled fence to the base fence. 128 dma_fence_put(&fence->scheduled); in drm_sched_fence_release_finished() 146 return container_of(f, struct drm_sched_fence, scheduled); in to_drm_sched_fence() 170 dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled, in drm_sched_fence_create()
|
D | sched_entity.c | 369 * Fence is a scheduled/finished fence from a job in drm_sched_entity_add_dependency_cb() 382 * it to be scheduled in drm_sched_entity_add_dependency_cb() 384 fence = dma_fence_get(&s_fence->scheduled); in drm_sched_entity_add_dependency_cb() 391 /* Ignore it when it is already scheduled */ in drm_sched_entity_add_dependency_cb() 405 * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
|
D | sched_main.c | 28 * into software queues which are then scheduled on a hardware run queue. 41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on 44 * The jobs in a entity are always scheduled in the order that they were pushed. 327 * jobs from it will not be scheduled further 348 if (bad->s_fence->scheduled.context == in drm_sched_increase_karma() 513 guilty_context = s_job->s_fence->scheduled.context; in drm_sched_resubmit_jobs() 516 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context) in drm_sched_resubmit_jobs()
|
/Linux-v5.10/drivers/net/wireless/broadcom/brcm80211/brcmfmac/ |
D | pno.h | 17 * brcmf_pno_start_sched_scan - initiate scheduled scan on device. 20 * @req: configuration parameters for scheduled scan. 26 * brcmf_pno_stop_sched_scan - terminate scheduled scan on device. 34 * brcmf_pno_wiphy_params - fill scheduled scan parameters in wiphy instance.
|
/Linux-v5.10/Documentation/powerpc/ |
D | pmu-ebb.rst | 44 user process. This means once an EBB event is scheduled on the PMU, no non-EBB 56 first will be scheduled and the other will be put in error state. See the 84 userspace is able to reliably determine which PMC the event is scheduled on. 95 guarantee that it has been scheduled on the PMU. To ensure that the EBB event 96 has been scheduled on the PMU, you must perform a read() on the event. If the 97 read() returns EOF, then the event has not been scheduled and EBBs are not
|
/Linux-v5.10/drivers/gpu/drm/ |
D | drm_vblank_work.c | 99 * If @work is already scheduled, this function will reschedule said work 103 * %1 if @work was successfully (re)scheduled, %0 if it was either already 104 * scheduled or cancelled, or a negative error code on failure. 131 /* Already scheduled w/ same vbl count */ in drm_vblank_work_schedule() 172 * Cancel an already scheduled vblank work and wait for its 175 * On return, @work is guaranteed to no longer be scheduled or running, even 212 * drm_vblank_work_flush - wait for a scheduled vblank work to finish
|
/Linux-v5.10/net/sctp/ |
D | stream_sched_prio.c | 51 /* Look into scheduled priorities first, as they are sorted and in sctp_sched_prio_get_head() 52 * we can find it fast IF it's scheduled. in sctp_sched_prio_get_head() 92 bool scheduled = false; in sctp_sched_prio_unsched() local 97 /* Scheduled */ in sctp_sched_prio_unsched() 98 scheduled = true; in sctp_sched_prio_unsched() 114 return scheduled; in sctp_sched_prio_unsched() 124 /* Nothing to do if already scheduled */ in sctp_sched_prio_sched()
|
/Linux-v5.10/drivers/usb/host/ |
D | xhci-mtk.h | 54 * (@repeat==1) scheduled within the interval 63 * scheduled first time within the interval 65 * scheduled within a interval. in the simple algorithm, only 69 * @pkts: number of packets to be transferred in the scheduled uframes
|
/Linux-v5.10/drivers/net/wireless/intel/iwlwifi/fw/api/ |
D | time-event.h | 108 * the first fragment is scheduled. 110 * the first 2 fragments are scheduled. 116 * scheduled. 174 * the first fragment is scheduled. 176 * the first 2 fragments are scheduled. 182 * scheduled. 311 * @status: true if scheduled, false otherwise (not executed) 444 * Note: the session protection will always be scheduled to start as
|
/Linux-v5.10/include/linux/ |
D | posix-timers.h | 131 * @work: The task work to be scheduled 132 * @scheduled: @work has been scheduled already, no further processing 136 unsigned int scheduled; member
|
/Linux-v5.10/drivers/soc/fsl/dpio/ |
D | qbman-portal.h | 391 * qbman_swp_fq_schedule() - Move the fq to the scheduled state 393 * @fqid: the index of frame queue to be scheduled 406 * qbman_swp_fq_force() - Force the FQ to fully scheduled state 410 * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled 443 * XOFF FQs will remain in the tenatively-scheduled state, even when 444 * non-empty, meaning they won't be selected for scheduled dequeuing. 445 * If a FQ is changed to XOFF after it had already become truly-scheduled
|
/Linux-v5.10/drivers/gpu/drm/i915/ |
D | i915_priolist_types.h | 23 /* Interactive workload, scheduled for immediate pageflipping */ 38 * another context. They get scheduled with their default priority and
|
/Linux-v5.10/arch/alpha/kernel/ |
D | perf_event.c | 36 /* Number of events scheduled; also number entries valid in arrays below. */ 40 /* Events currently scheduled. */ 42 /* Event type of each scheduled event. */ 44 /* Current index of each scheduled event; if not yet determined 149 * Check that a group of events can be simultaneously scheduled on to the 369 * Check that a group of events can be simultaneously scheduled on to the PMU. 387 * If new events have been scheduled then update cpuc with the new 637 * scheduled on to the PMU. At that point the code to programme the in __hw_perf_event_init() 646 * be scheduled on to the PMU. in __hw_perf_event_init() 731 /* Update cpuc with information from any new scheduled events. */ in alpha_pmu_enable()
|
/Linux-v5.10/arch/s390/pci/ |
D | pci_irq.c | 147 atomic_t scheduled; member 153 atomic_t *scheduled = data; in zpci_handle_remote_irq() local 157 } while (atomic_dec_return(scheduled)); in zpci_handle_remote_irq() 179 if (atomic_inc_return(&cpu_data->scheduled) > 1) in zpci_handle_fallback_irq() 183 cpu_data->csd.info = &cpu_data->scheduled; in zpci_handle_fallback_irq()
|
/Linux-v5.10/kernel/ |
D | workqueue_internal.h | 34 struct list_head scheduled; /* L: scheduled works */ member
|
/Linux-v5.10/fs/btrfs/ |
D | reada.c | 61 int scheduled; member 110 re->scheduled = 0; in __readahead_hook() 721 if (re->scheduled || list_empty(&re->extctl)) { in reada_start_machine_dev() 726 re->scheduled = 1; in reada_start_machine_dev() 886 pr_debug(" re: logical %llu size %u empty %d scheduled %d", in dump_devs() 888 list_empty(&re->extctl), re->scheduled); in dump_devs() 915 if (!re->scheduled) { in dump_devs() 919 pr_debug("re: logical %llu size %u list empty %d scheduled %d", in dump_devs() 921 list_empty(&re->extctl), re->scheduled); in dump_devs()
|
/Linux-v5.10/Documentation/devicetree/bindings/net/ |
D | ti,k3-am654-cpts.yaml | 24 - Ethernet Enhanced Scheduled Traffic Operations (CPTS_ESTFn) (TSN) 29 Scheduled Traffic Operations (CPTS_ESTFn) and PCIe Subsystem Precision Time
|
/Linux-v5.10/Documentation/vm/ |
D | active_mm.rst | 54 anonymous process gets scheduled away, the borrowed address space is 66 gets scheduled away in favour of a real thread, the "zombie" mm gets
|
/Linux-v5.10/drivers/usb/dwc2/ |
D | hcd_queue.c | 152 * @interval: How often we need to be scheduled for the reservation this 651 * already scheduled something on the low speed schedule and 961 /* If we scheduled all w/out breaking out then we're all good */ in dwc2_uframe_schedule_split() 1072 * dwc2_pick_first_frame() - Choose 1st frame for qh that's already scheduled 1074 * Takes a qh that has already been scheduled (which means we know we have the 1104 * NOTE: if we could quantify how long till we actually get scheduled in dwc2_pick_first_frame() 1123 * the first frame should be based on when the first scheduled in dwc2_pick_first_frame() 1162 * scheduled for. in dwc2_pick_first_frame() 1289 * Wait for the lock, or for us to be scheduled again. We in dwc2_unreserve_timer_fn() 1290 * could be scheduled again if: in dwc2_unreserve_timer_fn() [all …]
|
/Linux-v5.10/include/xen/interface/ |
D | vcpu.h | 99 /* VCPU is runnable, but not currently scheduled on any physical CPU. */ 120 * updated by the hypervisor each time the VCPU is scheduled. Thus 123 * VCPU was last scheduled to run.
|
/Linux-v5.10/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_job.c | 253 /* Signal all jobs not yet scheduled */ in amdgpu_job_stop_all_jobs_on_sched() 265 dma_fence_signal(&s_fence->scheduled); in amdgpu_job_stop_all_jobs_on_sched() 273 /* Signal all jobs already scheduled to HW */ in amdgpu_job_stop_all_jobs_on_sched()
|
/Linux-v5.10/include/uapi/linux/sched/ |
D | types.h | 95 * A task with a min utilization value bigger than 0 is more likely scheduled 98 * scheduled on a CPU with no more capacity than the specified value.
|
/Linux-v5.10/drivers/net/ethernet/ti/ |
D | Kconfig | 118 protocol, Ethernet Enhanced Scheduled Traffic Operations (CPTS_ESTFn) 126 AM65 CPSW hardware supports Enhanced Scheduled Traffic (EST)
|
/Linux-v5.10/tools/lib/traceevent/Documentation/ |
D | libtraceevent-event_print.txt | 42 may need to be scheduled in. 55 On preemptible kernels (where the task can be scheduled
|