/Linux-v6.1/tools/testing/selftests/cgroup/ |
D | memcg_protection.m | 6 % hierarchy to illustrate how overcommitted protection spreads among siblings 9 % Simulation assumes siblings consumed the initial amount of memory (w/out 16 % n vector nominal protection of siblings set at the given level (memory.low) 48 siblings = sum(u); variable 52 e = protected * min(1, E / siblings); % normalize overcommit 55 unclaimed = max(0, E - siblings); 56 parent_overuse = sum(c) - siblings; 79 % XXX here I do parallel reclaim of all siblings
|
/Linux-v6.1/Documentation/admin-guide/hw-vuln/ |
D | core-scheduling.rst | 100 siblings of a core such that all the selected tasks running on a core are 107 the sibling has the task enqueued. For rest of the siblings in the core, 112 Once a task has been selected for all the siblings in the core, an IPI is sent to 113 siblings for whom a new task was selected. Siblings on receiving the IPI will 125 siblings could be forced to select a lower priority task if the highest 157 and are considered system-wide trusted. The forced-idling of siblings running 174 the siblings to switch to the new task. But there could be hardware delays in 176 cause an attacker task to start running on a CPU before its siblings receive the 177 IPI. Even though cache is flushed on entry to user mode, victim tasks on siblings 185 Core scheduling cannot protect against MDS attacks between the siblings [all …]
|
/Linux-v6.1/drivers/infiniband/hw/irdma/ |
D | ws.c | 133 list_for_each_entry(node, &parent->child_list_head, siblings) { in ws_find_node() 139 list_for_each_entry(node, &parent->child_list_head, siblings) { in ws_find_node() 214 list_del(&tc_node->siblings); in irdma_remove_leaf() 219 list_del(&vsi_node->siblings); in irdma_remove_leaf() 295 list_add(&vsi_node->siblings, &ws_tree_root->child_list_head); in irdma_ws_add() 322 list_add(&tc_node->siblings, &vsi_node->child_list_head); in irdma_ws_add() 356 list_del(&tc_node->siblings); in irdma_ws_add() 362 list_del(&vsi_node->siblings); in irdma_ws_add()
|
D | ws.h | 19 struct list_head siblings; member
|
/Linux-v6.1/drivers/gpu/drm/i915/gt/uc/ |
D | selftest_guc_multi_lrc.c | 33 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; in multi_lrc_create_parent() local 42 siblings[i++] = engine; in multi_lrc_create_parent() 48 logical_sort(siblings, i); in multi_lrc_create_parent() 50 return intel_engine_create_parallel(siblings, 1, i); in multi_lrc_create_parent()
|
/Linux-v6.1/drivers/gpu/drm/i915/gem/ |
D | i915_gem_context.c | 205 kfree(pc->user_engines[i].siblings); in proto_context_close() 404 struct intel_engine_cs **siblings; in set_proto_ctx_engines_balance() local 442 siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL); in set_proto_ctx_engines_balance() 443 if (!siblings) in set_proto_ctx_engines_balance() 454 siblings[n] = intel_engine_lookup_user(i915, in set_proto_ctx_engines_balance() 457 if (!siblings[n]) { in set_proto_ctx_engines_balance() 468 set->engines[idx].engine = siblings[0]; in set_proto_ctx_engines_balance() 469 kfree(siblings); in set_proto_ctx_engines_balance() 473 set->engines[idx].siblings = siblings; in set_proto_ctx_engines_balance() 479 kfree(siblings); in set_proto_ctx_engines_balance() [all …]
|
D | i915_gem_context_types.h | 102 * i915_gem_proto_engine::num_siblings and i915_gem_proto_engine::siblings. 106 * i915_gem_proto_engine::siblings. 115 /** @num_siblings: Number of balanced or parallel siblings */ 121 /** @siblings: Balanced siblings or num_siblings * width for parallel */ 122 struct intel_engine_cs **siblings; member
|
/Linux-v6.1/drivers/gpu/drm/i915/gt/ |
D | selftest_execlists.c | 3726 struct intel_engine_cs **siblings, in nop_virtual_engine() argument 3743 ve[n] = intel_engine_create_virtual(siblings, nsibling, 0); in nop_virtual_engine() 3854 struct intel_engine_cs **siblings, in __select_siblings() argument 3867 siblings[n++] = gt->engine_class[class][inst]; in __select_siblings() 3876 struct intel_engine_cs **siblings) in select_siblings() argument 3878 return __select_siblings(gt, class, siblings, NULL); in select_siblings() 3884 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; in live_virtual_engine() local 3905 nsibling = select_siblings(gt, class, siblings); in live_virtual_engine() 3910 err = nop_virtual_engine(gt, siblings, nsibling, in live_virtual_engine() 3916 err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN); in live_virtual_engine() [all …]
|
D | intel_execlists_submission.c | 197 struct intel_engine_cs *siblings[]; member 207 execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count, 637 * We have to kick all the siblings again in case we need to in __execlists_schedule_out() 1053 if (likely(engine == ve->siblings[0])) in virtual_xfer_context() 1067 if (ve->siblings[n] == engine) { in virtual_xfer_context() 1068 swap(ve->siblings[n], ve->siblings[0]); in virtual_xfer_context() 1442 str_yes_no(engine != ve->siblings[0])); in execlists_dequeue() 1464 * ve->siblings[] on an idle context, where in execlists_dequeue() 1465 * we may be using ve->siblings[] in in execlists_dequeue() 1469 GEM_BUG_ON(ve->siblings[0] != engine); in execlists_dequeue() [all …]
|
/Linux-v6.1/drivers/gpio/ |
D | gpio-sim.c | 556 struct list_head siblings; member 590 struct list_head siblings; member 694 list_for_each_entry(line, &bank->line_list, siblings) { in gpio_sim_make_line_names() 720 list_for_each_entry(line, &bank->line_list, siblings) in gpio_sim_make_line_names() 751 list_for_each_entry(bank, &dev->bank_list, siblings) { in gpio_sim_add_hogs() 752 list_for_each_entry(line, &bank->line_list, siblings) { in gpio_sim_add_hogs() 766 list_for_each_entry(bank, &dev->bank_list, siblings) { in gpio_sim_add_hogs() 767 list_for_each_entry(line, &bank->line_list, siblings) { in gpio_sim_add_hogs() 862 list_for_each_entry(this, &dev->bank_list, siblings) { in gpio_sim_bank_labels_non_unique() 863 list_for_each_entry(pos, &dev->bank_list, siblings) { in gpio_sim_bank_labels_non_unique() [all …]
|
/Linux-v6.1/include/uapi/linux/ |
D | membarrier.h | 70 * threads siblings have passed through a state 94 * call, that all its running threads siblings 120 * siblings have any currently running rseq
|
/Linux-v6.1/kernel/ |
D | Kconfig.preempt | 123 selection across SMT siblings. When enabled -- see 124 prctl(PR_SCHED_CORE) -- task selection ensures that all SMT siblings
|
/Linux-v6.1/tools/perf/util/ |
D | dwarf-aux.h | 69 DIE_FIND_CB_SIBLING = 2, /* Search only siblings */ 70 DIE_FIND_CB_CONTINUE = 3, /* Search children and siblings */
|
/Linux-v6.1/drivers/nvme/host/ |
D | multipath.c | 180 list_for_each_entry_rcu(ns, &head->list, siblings) { in nvme_mpath_revalidate_paths() 212 list_for_each_entry_rcu(ns, &head->list, siblings) { in __nvme_find_path() 249 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns, in nvme_next_ns() 250 siblings); in nvme_next_ns() 253 return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings); in nvme_next_ns() 325 list_for_each_entry_rcu(ns, &head->list, siblings) { in nvme_available_path()
|
/Linux-v6.1/drivers/iommu/ |
D | fsl_pamu_domain.h | 22 struct list_head link; /* link to domain siblings */
|
/Linux-v6.1/arch/x86/power/ |
D | hibernate.c | 194 * that SMT siblings are sleeping in hlt, as mwait is not safe in arch_resume_nosmt() 199 * SMT siblings out of hlt, and offline them again so that they in arch_resume_nosmt()
|
/Linux-v6.1/arch/sparc/include/asm/ |
D | oplib_32.h | 118 * siblings exist. 145 /* Search all siblings starting at the passed node for "name" matching
|
/Linux-v6.1/Documentation/devicetree/bindings/display/mediatek/ |
D | mediatek,od.yaml | 16 OD device node must be siblings to the central MMSYS_CONFIG node.
|
D | mediatek,split.yaml | 16 SPLIT device node must be siblings to the central MMSYS_CONFIG node.
|
D | mediatek,ufoe.yaml | 17 UFOe device node must be siblings to the central MMSYS_CONFIG node.
|
/Linux-v6.1/arch/alpha/kernel/ |
D | gct.c | 39 /* Now walk the tree, siblings first. */ in gct6_find_nodes()
|
/Linux-v6.1/arch/powerpc/platforms/pseries/ |
D | smp.c | 206 /* Doorbells can only be used for IPIs between SMT siblings */ in pSeries_smp_probe() 232 * Under PowerVM, FSCR[MSGP] is enabled as guest vCPU siblings are in pSeries_smp_probe()
|
/Linux-v6.1/arch/mips/mm/ |
D | context.c | 239 * If this CPU shares FTLB entries with its siblings and one or more of in check_switch_mmu_context() 240 * those siblings hasn't yet invalidated its TLB following a version in check_switch_mmu_context()
|
/Linux-v6.1/arch/x86/kernel/ |
D | tsc_sync.c | 328 * If the target CPU coming online doesn't have any of its core-siblings 332 * have more and more logical-siblings in that socket). 335 * core-siblings, if the first logical CPU in a socket passed the sync test.
|
/Linux-v6.1/Documentation/admin-guide/pm/ |
D | intel_epb.rst | 40 example, SMT siblings or cores in one package). For this reason, updating the
|