Home
last modified time | relevance | path

Searched refs:scheduler (Results 1 – 25 of 164) sorted by relevance

1234567

/Linux-v5.15/net/netfilter/ipvs/
Dip_vs_sched.c41 struct ip_vs_scheduler *scheduler) in ip_vs_bind_scheduler() argument
45 if (scheduler->init_service) { in ip_vs_bind_scheduler()
46 ret = scheduler->init_service(svc); in ip_vs_bind_scheduler()
52 rcu_assign_pointer(svc->scheduler, scheduler); in ip_vs_bind_scheduler()
65 cur_sched = rcu_dereference_protected(svc->scheduler, 1); in ip_vs_unbind_scheduler()
133 void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler) in ip_vs_scheduler_put() argument
135 if (scheduler) in ip_vs_scheduler_put()
136 module_put(scheduler->module); in ip_vs_scheduler_put()
145 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler); in ip_vs_scheduler_err()
167 int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) in register_ip_vs_scheduler() argument
[all …]
/Linux-v5.15/drivers/gpu/drm/i915/gvt/
Dsched_policy.c134 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in try_to_schedule_next_vgpu() local
143 if (scheduler->next_vgpu == scheduler->current_vgpu) { in try_to_schedule_next_vgpu()
144 scheduler->next_vgpu = NULL; in try_to_schedule_next_vgpu()
152 scheduler->need_reschedule = true; in try_to_schedule_next_vgpu()
156 if (scheduler->current_workload[engine->id]) in try_to_schedule_next_vgpu()
161 vgpu_update_timeslice(scheduler->current_vgpu, cur_time); in try_to_schedule_next_vgpu()
162 vgpu_data = scheduler->next_vgpu->sched_data; in try_to_schedule_next_vgpu()
166 scheduler->current_vgpu = scheduler->next_vgpu; in try_to_schedule_next_vgpu()
167 scheduler->next_vgpu = NULL; in try_to_schedule_next_vgpu()
169 scheduler->need_reschedule = false; in try_to_schedule_next_vgpu()
[all …]
Dscheduler.c290 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in shadow_context_status_change() local
296 spin_lock_irqsave(&scheduler->mmio_context_lock, flags); in shadow_context_status_change()
298 scheduler->engine_owner[ring_id]) { in shadow_context_status_change()
300 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], in shadow_context_status_change()
302 scheduler->engine_owner[ring_id] = NULL; in shadow_context_status_change()
304 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags); in shadow_context_status_change()
309 workload = scheduler->current_workload[ring_id]; in shadow_context_status_change()
315 spin_lock_irqsave(&scheduler->mmio_context_lock, flags); in shadow_context_status_change()
316 if (workload->vgpu != scheduler->engine_owner[ring_id]) { in shadow_context_status_change()
318 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], in shadow_context_status_change()
[all …]
Ddebugfs.c98 spin_lock_bh(&gvt->scheduler.mmio_context_lock); in vgpu_mmio_diff_show()
105 spin_unlock_bh(&gvt->scheduler.mmio_context_lock); in vgpu_mmio_diff_show()
/Linux-v5.15/Documentation/block/
Dswitching-sched.rst5 Each io queue has a set of io scheduler tunables associated with it. These
6 tunables control how the io scheduler works. You can find these entries
16 It is possible to change the IO scheduler for a given block device on
20 To set a specific scheduler, simply do this::
22 echo SCHEDNAME > /sys/block/DEV/queue/scheduler
24 where SCHEDNAME is the name of a defined IO scheduler, and DEV is the
28 a "cat /sys/block/DEV/queue/scheduler" - the list of valid names
29 will be displayed, with the currently selected scheduler in brackets::
31 # cat /sys/block/sda/queue/scheduler
33 # echo none >/sys/block/sda/queue/scheduler
[all …]
Ddeadline-iosched.rst2 Deadline IO scheduler tunables
5 This little file attempts to document how the deadline io scheduler works.
12 selecting an io scheduler on a per-device basis.
19 The goal of the deadline io scheduler is to attempt to guarantee a start
21 tunable. When a read request first enters the io scheduler, it is assigned
49 When we have to move requests from the io scheduler queue to the block
60 Sometimes it happens that a request enters the io scheduler that is contiguous
69 rbtree front sector lookup when the io scheduler merge function is called.
Dkyber-iosched.rst2 Kyber I/O scheduler tunables
5 The only two tunables for the Kyber scheduler are the target latencies for
Dnull_blk.rst89 Enable/disable the io scheduler.
92 0 nullb* use default blk-mq io scheduler
93 1 nullb* doesn't use io scheduler
Drequest.rst33 I I/O scheduler member
48 ``void *elevator_private`` I I/O scheduler private data
/Linux-v5.15/block/
DKconfig.iosched7 tristate "MQ deadline I/O scheduler"
10 MQ version of the deadline IO scheduler.
13 tristate "Kyber I/O scheduler"
16 The Kyber I/O scheduler is a low-overhead scheduler suitable for
22 tristate "BFQ I/O scheduler"
24 BFQ I/O scheduler for BLK-MQ. BFQ distributes the bandwidth of
/Linux-v5.15/Documentation/gpu/rfc/
Di915_scheduler.rst8 i915 with the DRM scheduler is:
14 * Lots of rework will need to be done to integrate with DRM scheduler so
32 * Convert the i915 to use the DRM scheduler
33 * GuC submission backend fully integrated with DRM scheduler
35 handled in DRM scheduler)
36 * Resets / cancels hook in DRM scheduler
37 * Watchdog hooks into DRM scheduler
39 integrated with DRM scheduler (e.g. state machine gets
41 * Execlists backend will minimum required to hook in the DRM scheduler
44 be difficult to integrate with the DRM scheduler and these
[all …]
/Linux-v5.15/Documentation/scheduler/
Dsched-design-CFS.rst10 scheduler implemented by Ingo Molnar and merged in Linux 2.6.23. It is the
11 replacement for the previous vanilla scheduler's SCHED_OTHER interactivity
59 previous vanilla scheduler and RSDL/SD are affected).
79 schedules (or a scheduler tick happens) the task's CPU usage is "accounted
93 other HZ detail. Thus the CFS scheduler has no notion of "timeslices" in the
94 way the previous scheduler had, and has no heuristics whatsoever. There is
99 which can be used to tune the scheduler from "desktop" (i.e., low latencies) to
101 for desktop workloads. SCHED_BATCH is handled by the CFS scheduler module too.
103 Due to its design, the CFS scheduler is not prone to any of the "attacks" that
104 exist today against the heuristics of the stock scheduler: fiftyp.c, thud.c,
[all …]
Dsched-nice-design.rst6 nice-levels implementation in the new Linux scheduler.
12 scheduler, (otherwise we'd have done it long ago) because nice level
16 In the O(1) scheduler (in 2003) we changed negative nice levels to be
77 With the old scheduler, if you for example started a niced task with +1
88 The new scheduler in v2.6.23 addresses all three types of complaints:
91 enough), the scheduler was decoupled from 'time slice' and HZ concepts
94 support: with the new scheduler nice +19 tasks get a HZ-independent
96 scheduler.
99 the new scheduler makes nice(1) have the same CPU utilization effect on
101 scheduler, running a nice +10 and a nice 11 task has the same CPU
[all …]
Dsched-energy.rst8 Energy Aware Scheduling (or EAS) gives the scheduler the ability to predict
23 The actual EM used by EAS is _not_ maintained by the scheduler, but by a
50 scheduler. This alternative considers two objectives: energy-efficiency and
53 The idea behind introducing an EM is to allow the scheduler to evaluate the
56 time, the EM must be as simple as possible to minimize the scheduler latency
60 for the scheduler to decide where a task should run (during wake-up), the EM
71 EAS (as well as the rest of the scheduler) uses the notion of 'capacity' to
87 The scheduler manages references to the EM objects in the topology code when the
89 scheduler maintains a singly linked list of all performance domains intersecting
115 Please note that the scheduler will create two duplicate list nodes for
[all …]
Dschedutil.txt10 With PELT we track some metrics across the various scheduler entities, from
86 - Documentation/scheduler/sched-capacity.rst:"1. CPU Capacity + 2. Task utilization"
120 Every time the scheduler load tracking is updated (task wakeup, task
147 Because these callbacks are directly from the scheduler, the DVFS hardware
/Linux-v5.15/sound/pci/mixart/
Dmixart_core.h217 u64 scheduler; member
230 u64 scheduler; member
239 u64 scheduler; member
380 u64 scheduler; member
431 u64 scheduler; member
491 u64 scheduler; member
536 u64 scheduler; member
/Linux-v5.15/tools/testing/kunit/test_data/
Dtest_is_test_passed-no_tests_run_no_header.log33 io scheduler noop registered
34 io scheduler deadline registered
35 io scheduler cfq registered (default)
36 io scheduler mq-deadline registered
37 io scheduler kyber registered
/Linux-v5.15/Documentation/virt/kvm/
Dhalt-polling.rst12 before giving up the cpu to the scheduler in order to let something else run.
15 very quickly by at least saving us a trip through the scheduler, normally on
18 interval or some other task on the runqueue is runnable the scheduler is
21 savings of not invoking the scheduler are distinguishable.
34 The maximum time for which to poll before invoking the scheduler, referred to
77 whether the scheduler is invoked within that function).
/Linux-v5.15/drivers/gpu/drm/i915/
Di915_getparam.c65 value = !!(i915->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES); in i915_getparam_ioctl()
111 value = i915->caps.scheduler; in i915_getparam_ioctl()
DTODO.txt8 - Come up with a plan what to do with drm/scheduler and how to get there.
26 should be moved to dma_fence, drm/scheduler or atomic commit helpers. Or
/Linux-v5.15/Documentation/devicetree/bindings/usb/
Dda8xx-usb.txt35 - reg-names: "controller", "scheduler", "queuemgr"
74 reg-names = "controller", "scheduler", "queuemgr";
/Linux-v5.15/drivers/gpu/drm/i915/gt/
Dintel_engine_user.c130 i915->caps.scheduler = enabled & ~disabled; in set_scheduler_caps()
131 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED)) in set_scheduler_caps()
132 i915->caps.scheduler = 0; in set_scheduler_caps()
/Linux-v5.15/net/sched/
DKconfig16 If you say N here, you will get the standard packet scheduler, which
58 CBQ is a commonly used scheduler, so if you're unsure, you should
92 Say Y here if you want to use the ATM pseudo-scheduler. This
106 scheduler.
114 Say Y here if you want to use an n-band queue packet scheduler
244 tristate "Deficit Round Robin scheduler (DRR)"
255 tristate "Multi-queue priority scheduler (MQPRIO)"
257 Say Y here if you want to use the Multi-queue Priority scheduler.
258 This scheduler allows QOS to be offloaded on NICs that have support
267 tristate "SKB priority queue scheduler (SKBPRIO)"
[all …]
/Linux-v5.15/Documentation/admin-guide/cgroup-v1/
Dcpusets.rst60 CPUs or Memory Nodes not in that cpuset. The scheduler will not
106 kernel to avoid any additional impact on the critical scheduler or
294 the system load imposed by a batch scheduler monitoring this
299 counter, a batch scheduler can detect memory pressure with a
304 the batch scheduler can obtain the key information, memory
392 The kernel scheduler (kernel/sched/core.c) automatically load balances
400 linearly with the number of CPUs being balanced. So the scheduler
433 scheduler will avoid load balancing across the CPUs in that cpuset,
438 enabled, then the scheduler will have one sched domain covering all
451 scheduler might not consider the possibility of load balancing that
[all …]
/Linux-v5.15/Documentation/ABI/testing/
Dsysfs-cfq-target-latency6 when the user sets cfq to /sys/block/<device>/scheduler.

1234567