| /Linux-v5.4/net/netfilter/ipvs/ |
| D | ip_vs_sched.c | 41 struct ip_vs_scheduler *scheduler) in ip_vs_bind_scheduler() argument 45 if (scheduler->init_service) { in ip_vs_bind_scheduler() 46 ret = scheduler->init_service(svc); in ip_vs_bind_scheduler() 52 rcu_assign_pointer(svc->scheduler, scheduler); in ip_vs_bind_scheduler() 65 cur_sched = rcu_dereference_protected(svc->scheduler, 1); in ip_vs_unbind_scheduler() 133 void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler) in ip_vs_scheduler_put() argument 135 if (scheduler) in ip_vs_scheduler_put() 136 module_put(scheduler->module); in ip_vs_scheduler_put() 145 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler); in ip_vs_scheduler_err() 167 int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) in register_ip_vs_scheduler() argument [all …]
|
| /Linux-v5.4/drivers/gpu/drm/i915/gvt/ |
| D | sched_policy.c | 134 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in try_to_schedule_next_vgpu() local 143 if (scheduler->next_vgpu == scheduler->current_vgpu) { in try_to_schedule_next_vgpu() 144 scheduler->next_vgpu = NULL; in try_to_schedule_next_vgpu() 152 scheduler->need_reschedule = true; in try_to_schedule_next_vgpu() 156 if (scheduler->current_workload[i]) in try_to_schedule_next_vgpu() 161 vgpu_update_timeslice(scheduler->current_vgpu, cur_time); in try_to_schedule_next_vgpu() 162 vgpu_data = scheduler->next_vgpu->sched_data; in try_to_schedule_next_vgpu() 166 scheduler->current_vgpu = scheduler->next_vgpu; in try_to_schedule_next_vgpu() 167 scheduler->next_vgpu = NULL; in try_to_schedule_next_vgpu() 169 scheduler->need_reschedule = false; in try_to_schedule_next_vgpu() [all …]
|
| D | scheduler.c | 232 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in shadow_context_status_change() local 238 spin_lock_irqsave(&scheduler->mmio_context_lock, flags); in shadow_context_status_change() 240 scheduler->engine_owner[ring_id]) { in shadow_context_status_change() 242 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], in shadow_context_status_change() 244 scheduler->engine_owner[ring_id] = NULL; in shadow_context_status_change() 246 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags); in shadow_context_status_change() 251 workload = scheduler->current_workload[ring_id]; in shadow_context_status_change() 257 spin_lock_irqsave(&scheduler->mmio_context_lock, flags); in shadow_context_status_change() 258 if (workload->vgpu != scheduler->engine_owner[ring_id]) { in shadow_context_status_change() 260 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], in shadow_context_status_change() [all …]
|
| D | debugfs.c | 99 spin_lock_bh(&gvt->scheduler.mmio_context_lock); in vgpu_mmio_diff_show() 106 spin_unlock_bh(&gvt->scheduler.mmio_context_lock); in vgpu_mmio_diff_show()
|
| D | Makefile | 5 execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
|
| /Linux-v5.4/Documentation/block/ |
| D | switching-sched.rst | 5 Each io queue has a set of io scheduler tunables associated with it. These 6 tunables control how the io scheduler works. You can find these entries 16 It is possible to change the IO scheduler for a given block device on 20 To set a specific scheduler, simply do this:: 22 echo SCHEDNAME > /sys/block/DEV/queue/scheduler 24 where SCHEDNAME is the name of a defined IO scheduler, and DEV is the 28 a "cat /sys/block/DEV/queue/scheduler" - the list of valid names 29 will be displayed, with the currently selected scheduler in brackets:: 31 # cat /sys/block/sda/queue/scheduler 33 # echo none >/sys/block/sda/queue/scheduler [all …]
|
| D | deadline-iosched.rst | 2 Deadline IO scheduler tunables 5 This little file attempts to document how the deadline io scheduler works. 12 selecting an io scheduler on a per-device basis. 19 The goal of the deadline io scheduler is to attempt to guarantee a start 21 tunable. When a read request first enters the io scheduler, it is assigned 49 When we have to move requests from the io scheduler queue to the block 60 Sometimes it happens that a request enters the io scheduler that is contiguous 69 rbtree front sector lookup when the io scheduler merge function is called.
|
| D | kyber-iosched.rst | 2 Kyber I/O scheduler tunables 5 The only two tunables for the Kyber scheduler are the target latencies for
|
| D | null_blk.rst | 89 Enable/disable the io scheduler. 92 0 nullb* use default blk-mq io scheduler 93 1 nullb* doesn't use io scheduler
|
| D | request.rst | 33 I I/O scheduler member 48 ``void *elevator_private`` I I/O scheduler private data
|
| D | queue-sysfs.rst | 195 scheduler (RW) 198 for this block device. The currently active IO scheduler will be enclosed 199 in [] brackets. Writing an IO scheduler name to this file will switch 200 control of this block device to that new IO scheduler. Note that writing 201 an IO scheduler name to this file will attempt to load that IO scheduler
|
| D | biodoc.rst | 60 - I/O scheduler modularization 80 4. The I/O scheduler 127 Various parameters that the generic i/o scheduler logic uses are set at 234 iii. The i/o scheduler algorithm itself can be replaced/set as appropriate 236 As in 2.4, it is possible to plugin a brand new i/o scheduler for a particular 239 of the i/o scheduler. There are more pluggable callbacks, e.g for init, 243 the i/o scheduler from block drivers. 245 I/O scheduler wrappers are to be used instead of accessing the queue directly. 246 See section 4. The I/O scheduler for details. 395 on to the generic block layer, only to be merged by the i/o scheduler [all …]
|
| /Linux-v5.4/block/ |
| D | Kconfig.iosched | 7 tristate "MQ deadline I/O scheduler" 10 MQ version of the deadline IO scheduler. 13 tristate "Kyber I/O scheduler" 16 The Kyber I/O scheduler is a low-overhead scheduler suitable for 22 tristate "BFQ I/O scheduler" 24 BFQ I/O scheduler for BLK-MQ. BFQ distributes the bandwidth of
|
| /Linux-v5.4/Documentation/scheduler/ |
| D | sched-design-CFS.rst | 10 scheduler implemented by Ingo Molnar and merged in Linux 2.6.23. It is the 11 replacement for the previous vanilla scheduler's SCHED_OTHER interactivity 59 previous vanilla scheduler and RSDL/SD are affected). 79 schedules (or a scheduler tick happens) the task's CPU usage is "accounted 93 other HZ detail. Thus the CFS scheduler has no notion of "timeslices" in the 94 way the previous scheduler had, and has no heuristics whatsoever. There is 99 which can be used to tune the scheduler from "desktop" (i.e., low latencies) to 101 for desktop workloads. SCHED_BATCH is handled by the CFS scheduler module too. 103 Due to its design, the CFS scheduler is not prone to any of the "attacks" that 104 exist today against the heuristics of the stock scheduler: fiftyp.c, thud.c, [all …]
|
| D | sched-nice-design.rst | 6 nice-levels implementation in the new Linux scheduler. 12 scheduler, (otherwise we'd have done it long ago) because nice level 16 In the O(1) scheduler (in 2003) we changed negative nice levels to be 77 With the old scheduler, if you for example started a niced task with +1 88 The new scheduler in v2.6.23 addresses all three types of complaints: 91 enough), the scheduler was decoupled from 'time slice' and HZ concepts 94 support: with the new scheduler nice +19 tasks get a HZ-independent 96 scheduler. 99 the new scheduler makes nice(1) have the same CPU utilization effect on 101 scheduler, running a nice +10 and a nice 11 task has the same CPU [all …]
|
| D | sched-energy.rst | 8 Energy Aware Scheduling (or EAS) gives the scheduler the ability to predict 23 The actual EM used by EAS is _not_ maintained by the scheduler, but by a 50 scheduler. This alternative considers two objectives: energy-efficiency and 53 The idea behind introducing an EM is to allow the scheduler to evaluate the 56 time, the EM must be as simple as possible to minimize the scheduler latency 60 for the scheduler to decide where a task should run (during wake-up), the EM 71 EAS (as well as the rest of the scheduler) uses the notion of 'capacity' to 87 The scheduler manages references to the EM objects in the topology code when the 89 scheduler maintains a singly linked list of all performance domains intersecting 115 Please note that the scheduler will create two duplicate list nodes for [all …]
|
| /Linux-v5.4/sound/pci/mixart/ |
| D | mixart_core.h | 217 u64 scheduler; member 230 u64 scheduler; member 239 u64 scheduler; member 380 u64 scheduler; member 431 u64 scheduler; member 491 u64 scheduler; member 536 u64 scheduler; member
|
| /Linux-v5.4/Documentation/virt/kvm/ |
| D | halt-polling.txt | 9 before giving up the cpu to the scheduler in order to let something else run. 12 very quickly by at least saving us a trip through the scheduler, normally on 15 interval or some other task on the runqueue is runnable the scheduler is 18 savings of not invoking the scheduler are distinguishable. 31 The maximum time for which to poll before invoking the scheduler, referred to 73 whether the scheduler is invoked within that function).
|
| /Linux-v5.4/drivers/gpu/drm/i915/ |
| D | i915_getparam.c | 62 value = !!(i915->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES); in i915_getparam_ioctl() 108 value = i915->caps.scheduler; in i915_getparam_ioctl()
|
| /Linux-v5.4/Documentation/devicetree/bindings/usb/ |
| D | da8xx-usb.txt | 35 - reg-names: "controller", "scheduler", "queuemgr" 74 reg-names = "controller", "scheduler", "queuemgr";
|
| /Linux-v5.4/drivers/gpu/drm/i915/gt/ |
| D | intel_engine_user.c | 125 i915->caps.scheduler = enabled & ~disabled; in set_scheduler_caps() 126 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED)) in set_scheduler_caps() 127 i915->caps.scheduler = 0; in set_scheduler_caps()
|
| /Linux-v5.4/Documentation/admin-guide/cgroup-v1/ |
| D | cpusets.rst | 58 CPUs or Memory Nodes not in that cpuset. The scheduler will not 104 kernel to avoid any additional impact on the critical scheduler or 281 the system load imposed by a batch scheduler monitoring this 286 counter, a batch scheduler can detect memory pressure with a 291 the batch scheduler can obtain the key information, memory 379 The kernel scheduler (kernel/sched/core.c) automatically load balances 387 linearly with the number of CPUs being balanced. So the scheduler 420 scheduler will avoid load balancing across the CPUs in that cpuset, 425 enabled, then the scheduler will have one sched domain covering all 438 scheduler might not consider the possibility of load balancing that [all …]
|
| /Linux-v5.4/Documentation/ABI/testing/ |
| D | sysfs-cfq-target-latency | 6 when the user sets cfq to /sys/block/<device>/scheduler.
|
| /Linux-v5.4/net/sched/ |
| D | Kconfig | 16 If you say N here, you will get the standard packet scheduler, which 58 CBQ is a commonly used scheduler, so if you're unsure, you should 92 Say Y here if you want to use the ATM pseudo-scheduler. This 106 scheduler. 114 Say Y here if you want to use an n-band queue packet scheduler 244 tristate "Deficit Round Robin scheduler (DRR)" 255 tristate "Multi-queue priority scheduler (MQPRIO)" 257 Say Y here if you want to use the Multi-queue Priority scheduler. 258 This scheduler allows QOS to be offloaded on NICs that have support 267 tristate "SKB priority queue scheduler (SKBPRIO)" [all …]
|
| /Linux-v5.4/Documentation/admin-guide/pm/ |
| D | cpufreq.rst | 158 all of the online CPUs belonging to the given policy with the CPU scheduler. 159 The utilization update callbacks will be invoked by the CPU scheduler on 161 scheduler tick or generally whenever the CPU utilization may change (from the 162 scheduler's perspective). They are expected to carry out computations needed 166 scheduler context or asynchronously, via a kernel thread or workqueue, depending 187 callbacks are invoked by the CPU scheduler in the same way as for scaling 189 use and change the hardware configuration accordingly in one go from scheduler 388 This governor uses CPU utilization data available from the CPU scheduler. It 389 generally is regarded as a part of the CPU scheduler, so it can access the 390 scheduler's internal data structures directly. [all …]
|