/Linux-v6.6/kernel/irq/ |
D | cpuhotplug.c | 19 /* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */ 28 * which do not implement effective affinity, but the architecture has in irq_needs_fixup() 29 * enabled the config switch. Use the general affinity mask instead. in irq_needs_fixup() 45 pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n", in irq_needs_fixup() 58 const struct cpumask *affinity; in migrate_one_irq() local 64 * still in the radix tree. Also if the chip has no affinity setter, in migrate_one_irq() 76 * - Affinity mask does not include this CPU. in migrate_one_irq() 100 * mask, so the last change of the affinity does not get lost. If in migrate_one_irq() 102 * any online CPU, use the current affinity mask. in migrate_one_irq() 105 affinity = irq_desc_get_pending_mask(desc); in migrate_one_irq() [all …]
|
D | irqdesc.c | 56 if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, in alloc_masks() 63 free_cpumask_var(desc->irq_common_data.affinity); in alloc_masks() 73 free_cpumask_var(desc->irq_common_data.affinity); in alloc_masks() 81 const struct cpumask *affinity) in desc_smp_init() argument 83 if (!affinity) in desc_smp_init() 84 affinity = irq_default_affinity; in desc_smp_init() 85 cpumask_copy(desc->irq_common_data.affinity, affinity); in desc_smp_init() 99 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } in desc_smp_init() argument 103 const struct cpumask *affinity, struct module *owner) in desc_set_defaults() argument 126 desc_smp_init(desc, node, affinity); in desc_set_defaults() [all …]
|
D | affinity.c | 19 * irq_create_affinity_masks - Create affinity masks for multiqueue spreading 21 * @affd: Description of the affinity requirements 63 /* Fill out vectors at the beginning that don't need affinity */ in irq_create_affinity_masks() 69 * have multiple sets, build each sets affinity mask separately. in irq_create_affinity_masks() 89 /* Fill out vectors at the end that don't need affinity */ in irq_create_affinity_masks() 108 * @affd: Description of the affinity requirements
|
/Linux-v6.6/Documentation/arch/arm64/ |
D | asymmetric-32bit.rst | 51 CPU affinity. 68 On a homogeneous system, the CPU affinity of a task is preserved across 71 affinity mask contains 64-bit-only CPUs. In this situation, the kernel 72 determines the new affinity mask as follows: 74 1. If the 32-bit-capable subset of the affinity mask is not empty, 75 then the affinity is restricted to that subset and the old affinity 84 affinity of the task is then changed to match the 32-bit-capable 87 3. On failure (i.e. out of memory), the affinity is changed to the set 91 invalidate the affinity mask saved in (1) and attempt to restore the CPU 92 affinity of the task using the saved mask if it was previously valid. [all …]
|
/Linux-v6.6/lib/ |
D | cpu_rmap.c | 3 * cpu_rmap.c: CPU affinity reverse-map support 14 * CPU affinity. However, we do not assume that the object affinities 21 * alloc_cpu_rmap - allocate CPU affinity reverse-map 162 * cpu_rmap_update - update CPU rmap following a change of object affinity 164 * @index: Index of object whose affinity changed 165 * @affinity: New CPU affinity of object 168 const struct cpumask *affinity) in cpu_rmap_update() argument 188 /* Set distance to 0 for all CPUs in the new affinity mask. in cpu_rmap_update() 191 for_each_cpu(cpu, affinity) { in cpu_rmap_update() 223 /* Glue between IRQ affinity notifiers and CPU rmaps */ [all …]
|
/Linux-v6.6/tools/perf/util/ |
D | affinity.c | 2 /* Manage affinity to optimize IPIs inside the kernel perf API. */ 10 #include "affinity.h" 24 int affinity__setup(struct affinity *a) in affinity__setup() 44 * It is more efficient to change perf's affinity to the target 48 void affinity__set(struct affinity *a, int cpu) in affinity__set() 63 * We ignore errors because affinity is just an optimization. in affinity__set() 71 static void __affinity__cleanup(struct affinity *a) in __affinity__cleanup() 81 void affinity__cleanup(struct affinity *a) in affinity__cleanup()
|
D | affinity.h | 7 struct affinity { struct 13 void affinity__cleanup(struct affinity *a); argument 14 void affinity__set(struct affinity *a, int cpu); 15 int affinity__setup(struct affinity *a);
|
D | evlist.h | 198 int affinity, int flush, int comp_level); 362 /** If present, used to set the affinity when switching between CPUs. */ 363 struct affinity *affinity; member 367 * evlist__for_each_cpu - without affinity, iterate over the evlist. With 368 * affinity, iterate over all CPUs and then the evlist 370 * CPUs the affinity is set to the CPU to avoid IPIs 374 * @affinity: NULL or used to set the affinity to the current CPU. 376 #define evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) \ argument 377 for ((evlist_cpu_itr) = evlist__cpu_begin(evlist, affinity); \ 382 struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity); [all …]
|
/Linux-v6.6/tools/virtio/ringtest/ |
D | run-on-all.sh | 19 echo "GUEST AFFINITY $cpu" 20 "$@" --host-affinity $HOST_AFFINITY --guest-affinity $cpu 23 echo "NO GUEST AFFINITY" 24 "$@" --host-affinity $HOST_AFFINITY 25 echo "NO AFFINITY"
|
/Linux-v6.6/drivers/infiniband/hw/hfi1/ |
D | affinity.h | 37 /* Initialize driver affinity data */ 40 * Set IRQ affinity to a CPU. The function will determine the 41 * CPU and set the affinity to it. 46 * Remove the IRQ's CPU affinity. This function also updates 52 * Determine a CPU affinity for a user process, if the process does not 53 * have an affinity set yet. 77 struct mutex lock; /* protects affinity nodes */
|
D | affinity.c | 12 #include "affinity.h" 145 * The real cpu mask is part of the affinity struct but it has to be in node_affinity_init() 386 hfi1_cdbg(AFFINITY, in _dev_comp_vect_mappings_destroy() 436 hfi1_cdbg(AFFINITY, in _dev_comp_vect_mappings_create() 512 … "Number of kernel receive queues is too large for completion vector affinity to be effective\n"); in _dev_comp_vect_cpu_mask_init() 541 hfi1_cdbg(AFFINITY, in _dev_comp_vect_cpu_mask_init() 542 "[%s] Completion vector affinity CPU set(s) %*pbl", in _dev_comp_vect_cpu_mask_init() 581 * Interrupt affinity. 606 * If this is the first time this NUMA node's affinity is used, in hfi1_dev_affinity_init() 607 * create an entry in the global affinity structure and initialize it. in hfi1_dev_affinity_init() [all …]
|
/Linux-v6.6/tools/testing/selftests/rseq/ |
D | basic_test.c | 18 cpu_set_t affinity, test_affinity; in test_cpu_pointer() local 21 sched_getaffinity(0, sizeof(affinity), &affinity); in test_cpu_pointer() 24 if (CPU_ISSET(i, &affinity)) { in test_cpu_pointer() 39 sched_setaffinity(0, sizeof(affinity), &affinity); in test_cpu_pointer()
|
/Linux-v6.6/Documentation/core-api/ |
D | workqueue.rst | 350 Affinity Scopes 353 An unbound workqueue groups CPUs according to its affinity scope to improve 354 cache locality. For example, if a workqueue is using the default affinity 361 Workqueue currently supports the following affinity scopes. 379 cases. This is the default affinity scope. 388 The default affinity scope can be changed with the module parameter 389 ``workqueue.default_affinity_scope`` and a specific workqueue's affinity 392 If ``WQ_SYSFS`` is set, the workqueue will have the following affinity scope 397 Read to see the current affinity scope. Write to change. 403 0 by default indicating that affinity scopes are not strict. When a work [all …]
|
/Linux-v6.6/tools/power/cpupower/bench/ |
D | system.c | 67 * sets cpu affinity for the process 69 * @param cpu cpu# to which the affinity should be set 72 * @retval -1 when setting the affinity failed 82 dprintf("set affinity to cpu #%u\n", cpu); in set_cpu_affinity() 86 fprintf(stderr, "warning: unable to set cpu affinity\n"); in set_cpu_affinity() 146 * sets up the cpu affinity and scheduler priority 155 printf("set cpu affinity to cpu #%u\n", config->cpu); in prepare_system()
|
/Linux-v6.6/include/linux/ |
D | cpu_rmap.h | 6 * cpu_rmap.c: CPU affinity reverse-map support 16 * struct cpu_rmap - CPU affinity reverse-map 21 * based on affinity masks 39 const struct cpumask *affinity); 52 * alloc_irq_cpu_rmap - allocate CPU affinity reverse-map for IRQs
|
D | irq.h | 59 * it from affinity setting 66 * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) 117 * IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity 118 * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity 138 * @affinity: IRQ affinity on SMP. If this is an IPI 141 * @effective_affinity: The effective IRQ affinity on SMP as some irq 143 * A subset of @affinity. 145 * @ipi_offset: Offset of first IPI target cpu in @affinity. Optional. 155 cpumask_var_t affinity; member 196 * IRQD_SETAFFINITY_PENDING - Affinity setting is pending [all …]
|
D | interrupt.h | 250 * struct irq_affinity_notify - context for notification of IRQ affinity changes 272 * struct irq_affinity - Description for automatic irq affinity assignements 273 * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of 275 * @post_vectors: Don't apply affinity to @post_vectors at end of 277 * @nr_sets: The number of interrupt sets for which affinity 295 * struct irq_affinity_desc - Interrupt affinity descriptor 296 * @mask: cpumask to hold the affinity assignment 318 * irq_update_affinity_hint - Update the affinity hint 322 * Updates the affinity hint, but does not change the affinity of the interrupt. 331 * irq_set_affinity_and_hint - Update the affinity hint and apply the provided [all …]
|
/Linux-v6.6/Documentation/devicetree/bindings/interrupt-controller/ |
D | apple,aic.yaml | 21 - Per-IRQ affinity setting 77 FIQ affinity can be expressed as a single "affinities" node, 79 affinity. 81 "^.+-affinity$": 88 the affinity is not the default.
|
/Linux-v6.6/drivers/md/ |
D | dm-ps-io-affinity.c | 12 #define DM_MSG_PREFIX "multipath io-affinity" 52 *error = "io-affinity ps: invalid number of arguments"; in ioa_add_path() 58 *error = "io-affinity ps: Error allocating path context"; in ioa_add_path() 67 *error = "io-affinity ps: Error allocating cpumask context"; in ioa_add_path() 74 *error = "io-affinity ps: invalid cpumask"; in ioa_add_path() 97 *error = "io-affinity ps: No new/valid CPU mapping found"; in ioa_add_path() 239 .name = "io-affinity",
|
/Linux-v6.6/arch/mips/kernel/ |
D | mips-mt-fpaff.c | 20 * CPU mask used to set process affinity for MT VPEs/TCs with FPUs 30 * FPU affinity with the user's requested processor affinity. 63 * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process 155 * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process 214 printk(KERN_DEBUG "FPU Affinity set after %ld emulations\n", in mt_fp_affinity_init()
|
/Linux-v6.6/drivers/pci/msi/ |
D | api.c | 139 * @affdesc: Optional pointer to an affinity descriptor structure. NULL otherwise 219 * * %PCI_IRQ_AFFINITY Auto-manage IRQs affinity by spreading 244 * vectors with affinity requirements 249 * @affd: affinity requirements (can be %NULL). 286 * Invoke the affinity spreading logic to ensure that in pci_alloc_irq_vectors_affinity() 326 * pci_irq_get_affinity() - Get a device interrupt vector affinity 335 * Return: MSI/MSI-X vector affinity, NULL if @nr is out of range or if 336 * the MSI(-X) vector was allocated without explicit affinity 355 /* MSI[X] interrupts can be allocated without affinity descriptor */ in pci_irq_get_affinity() 356 if (!desc->affinity) in pci_irq_get_affinity() [all …]
|
/Linux-v6.6/arch/arm64/kernel/ |
D | setup.c | 105 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity 112 u32 i, affinity, fs[4], bits[4], ls; in smp_build_mpidr_hash() local 116 * not contribute to affinity levels, ie they never toggle. in smp_build_mpidr_hash() 122 * Find and stash the last and first bit set at all affinity levels to in smp_build_mpidr_hash() 126 affinity = MPIDR_AFFINITY_LEVEL(mask, i); in smp_build_mpidr_hash() 130 * to express the affinity level. in smp_build_mpidr_hash() 132 ls = fls(affinity); in smp_build_mpidr_hash() 133 fs[i] = affinity ? ffs(affinity) - 1 : 0; in smp_build_mpidr_hash() 138 * significant bits at each affinity level and by shifting in smp_build_mpidr_hash()
|
/Linux-v6.6/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_crat.h | 89 * ComputeUnit Affinity structure and definitions 124 * HSA Memory Affinity structure and definitions 149 * HSA Cache Affinity structure and definitions 178 * HSA TLB Affinity structure and definitions 213 * HSA CCompute/APU Affinity structure and definitions 232 * HSA IO Link Affinity structure and definitions
|
/Linux-v6.6/drivers/perf/ |
D | arm_pmu_platform.c | 62 return !!of_find_property(node, "interrupt-affinity", NULL); in pmu_has_irq_affinity() 71 * If we don't have an interrupt-affinity property, we guess irq in pmu_parse_irq_affinity() 72 * affinity matches our logical CPU order, as we used to assume. in pmu_parse_irq_affinity() 78 dn = of_parse_phandle(dev->of_node, "interrupt-affinity", i); in pmu_parse_irq_affinity() 80 dev_warn(dev, "failed to parse interrupt-affinity[%d]\n", i); in pmu_parse_irq_affinity() 124 dev_warn(dev, "no interrupt-affinity property, guessing.\n"); in pmu_parse_irqs()
|
/Linux-v6.6/Documentation/core-api/irq/ |
D | irq-affinity.rst | 2 SMP IRQ affinity 14 IRQ affinity then the value will not change from the default of all cpus. 16 /proc/irq/default_smp_affinity specifies default affinity mask that applies 17 to all non-active IRQs. Once IRQ is allocated/activated its affinity bitmask
|