/Linux-v4.19/drivers/gpu/drm/i915/gvt/ |
D | scheduler.c | 54 static void update_shadow_pdps(struct intel_vgpu_workload *workload) in update_shadow_pdps() argument 57 workload->req->hw_context->state->obj; in update_shadow_pdps() 61 if (WARN_ON(!workload->shadow_mm)) in update_shadow_pdps() 64 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount))) in update_shadow_pdps() 70 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps); in update_shadow_pdps() 79 static void sr_oa_regs(struct intel_vgpu_workload *workload, in sr_oa_regs() argument 82 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; in sr_oa_regs() 96 if (workload->ring_id != RCS) in sr_oa_regs() 100 workload->oactxctrl = reg_state[ctx_oactxctrl + 1]; in sr_oa_regs() 102 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { in sr_oa_regs() [all …]
|
D | execlist.c | 378 static int prepare_execlist_workload(struct intel_vgpu_workload *workload) in prepare_execlist_workload() argument 380 struct intel_vgpu *vgpu = workload->vgpu; in prepare_execlist_workload() 383 int ring_id = workload->ring_id; in prepare_execlist_workload() 386 if (!workload->emulate_schedule_in) in prepare_execlist_workload() 389 ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0); in prepare_execlist_workload() 390 ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1); in prepare_execlist_workload() 400 static int complete_execlist_workload(struct intel_vgpu_workload *workload) in complete_execlist_workload() argument 402 struct intel_vgpu *vgpu = workload->vgpu; in complete_execlist_workload() 403 int ring_id = workload->ring_id; in complete_execlist_workload() 411 gvt_dbg_el("complete workload %p status %d\n", workload, in complete_execlist_workload() [all …]
|
D | cmd_parser.c | 479 struct intel_vgpu_workload *workload; member 908 s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4); in cmd_reg_handler() 1115 s->workload->pending_events); in cmd_handler_pipe_control() 1122 s->workload->pending_events); in cmd_handler_mi_user_interrupt() 1589 s->workload->pending_events); in cmd_handler_mi_flush_dw() 1652 s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm; in find_bb_size() 1667 "ppgtt" : "ggtt", s->ring_id, s->workload); in find_bb_size() 1679 "ppgtt" : "ggtt", s->ring_id, s->workload); in find_bb_size() 1706 s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm; in perform_bb_shadow() 1770 list_add(&bb->list, &s->workload->shadow_bb); in perform_bb_shadow() [all …]
|
D | trace.h | 231 void *workload, char *cmd_name), 234 buf_addr_type, workload, cmd_name), 243 __field(void*, workload) 255 __entry->workload = workload; 271 __entry->workload)
|
D | scheduler.h | 133 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload); 159 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
|
D | cmd_parser.h | 45 int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload);
|
/Linux-v4.19/tools/perf/tests/ |
D | perf-record.c | 110 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); in test__PERF_RECORD() 122 if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { in test__PERF_RECORD() 208 if ((pid_t)sample.pid != evlist->workload.pid) { in test__PERF_RECORD() 210 name, evlist->workload.pid, sample.pid); in test__PERF_RECORD() 214 if ((pid_t)sample.tid != evlist->workload.pid) { in test__PERF_RECORD() 216 name, evlist->workload.pid, sample.tid); in test__PERF_RECORD() 225 (pid_t)event->comm.pid != evlist->workload.pid) { in test__PERF_RECORD()
|
D | event-times.c | 53 waitpid(evlist->workload.pid, NULL, 0); in detach__enable_on_exec()
|
/Linux-v4.19/tools/perf/Documentation/ |
D | perf-sched.txt | 18 of an arbitrary workload. 21 and other scheduling properties of the workload. 23 'perf sched script' to see a detailed trace of the workload that 26 'perf sched replay' to simulate the workload that was recorded 28 that mimic the workload based on the events in the trace. These 30 of the workload as it occurred when it was recorded - and can repeat 34 workload captured via perf sched record. Columns stand for
|
D | perf-timechart.txt | 6 perf-timechart - Tool to visualize total system behavior during a workload 18 of an arbitrary workload. By default timechart records only scheduler
|
D | examples.txt | 45 'repeat the workload N times' feature of perf stat: 65 Furthermore, these tracepoints can be used to sample the workload as
|
D | perf-kmem.txt | 18 of an arbitrary workload.
|
D | perf-kvm.txt | 24 of an arbitrary workload. 27 of an arbitrary workload and save it into a perf data file. We set the
|
D | perf-stat.txt | 264 if the workload is actually bound by the CPU and not by something else. 278 on workload with changing phases. 283 CPUs the workload runs on. If needed the CPUs can be forced using 341 For workload sessions we also display time the workloads spent in
|
D | intel-pt.txt | 565 workload only 568 workload). 570 "workload only" mode is selected by not using the other options but providing a 571 command to run (i.e. the workload). 580 In workload-only mode, the workload is traced but with per-cpu buffers. 581 Inheritance is allowed. Note that you can now trace a workload in per-thread 606 information to decode Intel PT in per-cpu mode, and potentially workload-only 607 mode too if the workload creates new processes.
|
D | tips.txt | 7 Save output of perf stat using: perf stat record <target workload>
|
/Linux-v4.19/Documentation/admin-guide/mm/ |
D | idle_page_tracking.rst | 11 accessed by a workload and which are idle. This information can be useful for 12 estimating the workload's working set size, which, in turn, can be taken into 13 account when configuring the workload parameters, setting memory cgroup limits, 14 or deciding where to place the workload within a compute cluster. 53 workload one should: 55 1. Mark all the workload's pages as idle by setting corresponding bits in 57 ``/proc/pid/pagemap`` if the workload is represented by a process, or by 58 filtering out alien pages using ``/proc/kpagecgroup`` in case the workload 61 2. Wait until the workload accesses its working set.
|
/Linux-v4.19/Documentation/filesystems/nfs/ |
D | knfsd-stats.txt | 52 Depending on the NFS workload patterns and various network stack 56 However this is a more accurate and less workload-dependent measure 72 pool for the NFS workload (the workload is thread-limited), in which 74 performance of the NFS workload. 91 threads configured than can be used by the NFS workload. This is 97 slow; the idle timeout is 60 minutes. Unless the NFS workload
|
/Linux-v4.19/drivers/gpu/drm/amd/powerplay/hwmgr/ |
D | pp_psm.c | 263 long workload; in psm_adjust_power_state_dynamic() local 289 workload = hwmgr->workload_setting[index]; in psm_adjust_power_state_dynamic() 291 if (hwmgr->power_profile_mode != workload && hwmgr->hwmgr_func->set_power_profile_mode) in psm_adjust_power_state_dynamic() 292 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0); in psm_adjust_power_state_dynamic()
|
/Linux-v4.19/tools/perf/util/ |
D | evlist.c | 50 evlist->workload.pid = -1; in perf_evlist__init() 1445 evlist->workload.pid = fork(); in perf_evlist__prepare_workload() 1446 if (evlist->workload.pid < 0) { in perf_evlist__prepare_workload() 1451 if (!evlist->workload.pid) { in perf_evlist__prepare_workload() 1515 thread_map__set_pid(evlist->threads, 0, evlist->workload.pid); in perf_evlist__prepare_workload() 1529 evlist->workload.cork_fd = go_pipe[1]; in perf_evlist__prepare_workload() 1544 if (evlist->workload.cork_fd > 0) { in perf_evlist__start_workload() 1550 ret = write(evlist->workload.cork_fd, &bf, 1); in perf_evlist__start_workload() 1554 close(evlist->workload.cork_fd); in perf_evlist__start_workload()
|
/Linux-v4.19/Documentation/timers/ |
D | NO_HZ.txt | 49 However, if you are instead running a light workload with long idle 56 In addition, if you are running either a real-time workload or an HPC 57 workload with short iterations, the scheduling-clock interrupts can 58 degrade your applications performance. If this describes your workload, 205 but do not see any change in your workload's behavior. Is this because 206 your workload isn't affected that much by OS jitter, or is it because 217 possible, then you can conclude that your workload is not all that 292 constraining the workload. For example, the only way to eliminate
|
/Linux-v4.19/Documentation/ |
D | kernel-per-CPU-kthreads.txt | 31 # run workload 230 1. Run your workload at a real-time priority, which will allow 270 slowly. Of course, you can also run your workload at 272 but if your workload is CPU-bound, this is a bad idea. 310 is feasible only if your workload never requires RCU priority 356 timer down to a level that is acceptable for your workload.
|
/Linux-v4.19/Documentation/block/ |
D | bfq-iosched.txt | 68 Regardless of the actual background workload, BFQ guarantees that, for 82 until the background workload terminates (also on SSDs). 88 of the background I/O workload. As a consequence, these applications 89 do not suffer from almost any glitch due to the background workload. 93 If some additional workload happens to be executed in parallel, then 110 workload and regardless of the device parameters. From these bandwidth 227 workload and the budgets assigned to the queue. 326 So depending on storage and workload, it might be useful to set 349 throughput. One important case is random workload. Because of this
|
/Linux-v4.19/drivers/gpu/drm/amd/powerplay/ |
D | amd_powerplay.c | 890 long workload; in pp_dpm_switch_power_profile() local 910 workload = hwmgr->workload_setting[index]; in pp_dpm_switch_power_profile() 915 workload = hwmgr->workload_setting[index]; in pp_dpm_switch_power_profile() 919 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0); in pp_dpm_switch_power_profile()
|
/Linux-v4.19/Documentation/acpi/ |
D | cppc_sysfs.txt | 63 Wait or run some workload
|