/Linux-v4.19/net/netfilter/ipvs/ |
D | ip_vs_sched.c | 46 struct ip_vs_scheduler *scheduler) in ip_vs_bind_scheduler() argument 50 if (scheduler->init_service) { in ip_vs_bind_scheduler() 51 ret = scheduler->init_service(svc); in ip_vs_bind_scheduler() 57 rcu_assign_pointer(svc->scheduler, scheduler); in ip_vs_bind_scheduler() 70 cur_sched = rcu_dereference_protected(svc->scheduler, 1); in ip_vs_unbind_scheduler() 138 void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler) in ip_vs_scheduler_put() argument 140 if (scheduler) in ip_vs_scheduler_put() 141 module_put(scheduler->module); in ip_vs_scheduler_put() 150 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler); in ip_vs_scheduler_err() 172 int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) in register_ip_vs_scheduler() argument [all …]
|
/Linux-v4.19/drivers/gpu/drm/i915/gvt/ |
D | sched_policy.c | 134 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in try_to_schedule_next_vgpu() local 143 if (scheduler->next_vgpu == scheduler->current_vgpu) { in try_to_schedule_next_vgpu() 144 scheduler->next_vgpu = NULL; in try_to_schedule_next_vgpu() 152 scheduler->need_reschedule = true; in try_to_schedule_next_vgpu() 156 if (scheduler->current_workload[i]) in try_to_schedule_next_vgpu() 161 vgpu_update_timeslice(scheduler->current_vgpu, cur_time); in try_to_schedule_next_vgpu() 162 vgpu_data = scheduler->next_vgpu->sched_data; in try_to_schedule_next_vgpu() 166 scheduler->current_vgpu = scheduler->next_vgpu; in try_to_schedule_next_vgpu() 167 scheduler->next_vgpu = NULL; in try_to_schedule_next_vgpu() 169 scheduler->need_reschedule = false; in try_to_schedule_next_vgpu() [all …]
|
D | scheduler.c | 226 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in shadow_context_status_change() local 232 spin_lock_irqsave(&scheduler->mmio_context_lock, flags); in shadow_context_status_change() 234 scheduler->engine_owner[ring_id]) { in shadow_context_status_change() 236 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], in shadow_context_status_change() 238 scheduler->engine_owner[ring_id] = NULL; in shadow_context_status_change() 240 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags); in shadow_context_status_change() 245 workload = scheduler->current_workload[ring_id]; in shadow_context_status_change() 251 spin_lock_irqsave(&scheduler->mmio_context_lock, flags); in shadow_context_status_change() 252 if (workload->vgpu != scheduler->engine_owner[ring_id]) { in shadow_context_status_change() 254 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], in shadow_context_status_change() [all …]
|
D | Makefile | 5 execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
|
D | debugfs.c | 99 spin_lock_bh(&gvt->scheduler.mmio_context_lock); in vgpu_mmio_diff_show() 106 spin_unlock_bh(&gvt->scheduler.mmio_context_lock); in vgpu_mmio_diff_show()
|
/Linux-v4.19/block/ |
D | Kconfig.iosched | 10 The no-op I/O scheduler is a minimal scheduler that does basic merging 17 tristate "Deadline I/O scheduler" 20 The deadline I/O scheduler is simple and compact. It will provide 26 tristate "CFQ I/O scheduler" 29 The CFQ I/O scheduler tries to distribute bandwidth equally 34 This is the default I/O scheduler. 45 prompt "Default I/O scheduler" 48 Select the I/O scheduler which will be used by default for all 69 tristate "MQ deadline I/O scheduler" 72 MQ version of the deadline IO scheduler. [all …]
|
/Linux-v4.19/Documentation/block/ |
D | switching-sched.txt | 5 Each io queue has a set of io scheduler tunables associated with it. These 6 tunables control how the io scheduler works. You can find these entries 17 IO scheduler for a given block device on the fly (thus making it possible, 18 for instance, to set the CFQ scheduler for the system default, but 22 To set a specific scheduler, simply do this: 24 echo SCHEDNAME > /sys/block/DEV/queue/scheduler 26 where SCHEDNAME is the name of a defined IO scheduler, and DEV is the 30 a "cat /sys/block/DEV/queue/scheduler" - the list of valid names 31 will be displayed, with the currently selected scheduler in brackets: 33 # cat /sys/block/hda/queue/scheduler [all …]
|
D | deadline-iosched.txt | 1 Deadline IO scheduler tunables 4 This little file attempts to document how the deadline io scheduler works. 11 selecting an io scheduler on a per-device basis. 20 The goal of the deadline io scheduler is to attempt to guarantee a start 22 tunable. When a read request first enters the io scheduler, it is assigned 50 When we have to move requests from the io scheduler queue to the block 61 Sometimes it happens that a request enters the io scheduler that is contiguous 70 rbtree front sector lookup when the io scheduler merge function is called.
|
D | 00-INDEX | 4 - BFQ IO scheduler and its tunables 12 - CFQ IO scheduler tunables 18 - Deadline IO scheduler tunables 20 - Block io priorities (in CFQ scheduler)
|
D | kyber-iosched.txt | 1 Kyber I/O scheduler tunables 4 The only two tunables for the Kyber scheduler are the target latencies for
|
D | queue-sysfs.txt | 156 scheduler (RW) 159 for this block device. The currently active IO scheduler will be enclosed 160 in [] brackets. Writing an IO scheduler name to this file will switch 161 control of this block device to that new IO scheduler. Note that writing 162 an IO scheduler name to this file will attempt to load that IO scheduler
|
D | request.txt | 23 I I/O scheduler member 38 void *elevator_private I I/O scheduler private data
|
D | biodoc.txt | 49 - I/O scheduler modularization 70 4. The I/O scheduler 116 Various parameters that the generic i/o scheduler logic uses are set at 223 iii. The i/o scheduler algorithm itself can be replaced/set as appropriate 225 As in 2.4, it is possible to plugin a brand new i/o scheduler for a particular 228 of the i/o scheduler. There are more pluggable callbacks, e.g for init, 232 the i/o scheduler from block drivers. 234 I/O scheduler wrappers are to be used instead of accessing the queue directly. 235 See section 4. The I/O scheduler for details. 373 on to the generic block layer, only to be merged by the i/o scheduler [all …]
|
/Linux-v4.19/Documentation/scheduler/ |
D | sched-design-CFS.txt | 9 scheduler implemented by Ingo Molnar and merged in Linux 2.6.23. It is the 10 replacement for the previous vanilla scheduler's SCHED_OTHER interactivity 56 previous vanilla scheduler and RSDL/SD are affected). 76 schedules (or a scheduler tick happens) the task's CPU usage is "accounted 89 other HZ detail. Thus the CFS scheduler has no notion of "timeslices" in the 90 way the previous scheduler had, and has no heuristics whatsoever. There is 95 which can be used to tune the scheduler from "desktop" (i.e., low latencies) to 97 for desktop workloads. SCHED_BATCH is handled by the CFS scheduler module too. 99 Due to its design, the CFS scheduler is not prone to any of the "attacks" that 100 exist today against the heuristics of the stock scheduler: fiftyp.c, thud.c, [all …]
|
D | sched-nice-design.txt | 2 nice-levels implementation in the new Linux scheduler. 8 scheduler, (otherwise we'd have done it long ago) because nice level 12 In the O(1) scheduler (in 2003) we changed negative nice levels to be 73 With the old scheduler, if you for example started a niced task with +1 84 The new scheduler in v2.6.23 addresses all three types of complaints: 87 enough), the scheduler was decoupled from 'time slice' and HZ concepts 90 support: with the new scheduler nice +19 tasks get a HZ-independent 92 scheduler. 95 the new scheduler makes nice(1) have the same CPU utilization effect on 97 scheduler, running a nice +10 and a nice 11 task has the same CPU [all …]
|
D | 00-INDEX | 12 - How and why the scheduler's nice levels are implemented.
|
D | sched-arch.txt | 13 To request the scheduler call switch_to with the runqueue unlocked, 18 penalty to the core scheduler implementation in the CONFIG_SMP case.
|
/Linux-v4.19/sound/pci/mixart/ |
D | mixart_core.h | 230 u64 scheduler; member 243 u64 scheduler; member 252 u64 scheduler; member 393 u64 scheduler; member 444 u64 scheduler; member 504 u64 scheduler; member 549 u64 scheduler; member
|
/Linux-v4.19/Documentation/virtual/kvm/ |
D | halt-polling.txt | 9 before giving up the cpu to the scheduler in order to let something else run. 12 very quickly by at least saving us a trip through the scheduler, normally on 15 interval or some other task on the runqueue is runnable the scheduler is 18 savings of not invoking the scheduler are distinguishable. 31 The maximum time for which to poll before invoking the scheduler, referred to 72 whether the scheduler is invoked within that function).
|
/Linux-v4.19/Documentation/devicetree/bindings/usb/ |
D | da8xx-usb.txt | 35 - reg-names: "controller", "scheduler", "queuemgr" 74 reg-names = "controller", "scheduler", "queuemgr";
|
/Linux-v4.19/Documentation/cgroup-v1/ |
D | cpusets.txt | 57 CPUs or Memory Nodes not in that cpuset. The scheduler will not 103 kernel to avoid any additional impact on the critical scheduler or 278 the system load imposed by a batch scheduler monitoring this 283 counter, a batch scheduler can detect memory pressure with a 288 the batch scheduler can obtain the key information, memory 376 The kernel scheduler (kernel/sched/core.c) automatically load balances 384 linearly with the number of CPUs being balanced. So the scheduler 416 scheduler will avoid load balancing across the CPUs in that cpuset, 421 enabled, then the scheduler will have one sched domain covering all 434 scheduler might not consider the possibility of load balancing that [all …]
|
/Linux-v4.19/Documentation/ABI/testing/ |
D | sysfs-cfq-target-latency | 6 when the user sets cfq to /sys/block/<device>/scheduler.
|
/Linux-v4.19/net/sched/ |
D | Kconfig | 15 If you say N here, you will get the standard packet scheduler, which 57 CBQ is a commonly used scheduler, so if you're unsure, you should 91 Say Y here if you want to use the ATM pseudo-scheduler. This 105 scheduler. 113 Say Y here if you want to use an n-band queue packet scheduler 232 tristate "Deficit Round Robin scheduler (DRR)" 243 tristate "Multi-queue priority scheduler (MQPRIO)" 245 Say Y here if you want to use the Multi-queue Priority scheduler. 246 This scheduler allows QOS to be offloaded on NICs that have support 255 tristate "SKB priority queue scheduler (SKBPRIO)" [all …]
|
/Linux-v4.19/Documentation/admin-guide/pm/ |
D | cpufreq.rst | 154 all of the online CPUs belonging to the given policy with the CPU scheduler. 155 The utilization update callbacks will be invoked by the CPU scheduler on 157 scheduler tick or generally whenever the CPU utilization may change (from the 158 scheduler's perspective). They are expected to carry out computations needed 162 scheduler context or asynchronously, via a kernel thread or workqueue, depending 183 callbacks are invoked by the CPU scheduler in the same way as for scaling 185 use and change the hardware configuration accordingly in one go from scheduler 384 This governor uses CPU utilization data available from the CPU scheduler. It 385 generally is regarded as a part of the CPU scheduler, so it can access the 386 scheduler's internal data structures directly. [all …]
|
/Linux-v4.19/Documentation/timers/ |
D | 00-INDEX | 10 - Summary of the different methods for the scheduler clock-interrupts management.
|