Home
last modified time | relevance | path

Searched full:affinity (Results 1 – 25 of 432) sorted by relevance

12345678910>>...18

/Linux-v5.10/kernel/irq/
Dcpuhotplug.c19 /* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */
28 * which do not implement effective affinity, but the architecture has in irq_needs_fixup()
29 * enabled the config switch. Use the general affinity mask instead. in irq_needs_fixup()
45 pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n", in irq_needs_fixup()
58 const struct cpumask *affinity; in migrate_one_irq() local
64 * still in the radix tree. Also if the chip has no affinity setter, in migrate_one_irq()
76 * - Affinity mask does not include this CPU. in migrate_one_irq()
100 * mask, so the last change of the affinity does not get lost. If in migrate_one_irq()
102 * any online CPU, use the current affinity mask. in migrate_one_irq()
105 affinity = irq_desc_get_pending_mask(desc); in migrate_one_irq()
[all …]
Dirqdesc.c57 if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, in alloc_masks()
64 free_cpumask_var(desc->irq_common_data.affinity); in alloc_masks()
74 free_cpumask_var(desc->irq_common_data.affinity); in alloc_masks()
82 const struct cpumask *affinity) in desc_smp_init() argument
84 if (!affinity) in desc_smp_init()
85 affinity = irq_default_affinity; in desc_smp_init()
86 cpumask_copy(desc->irq_common_data.affinity, affinity); in desc_smp_init()
100 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } in desc_smp_init() argument
104 const struct cpumask *affinity, struct module *owner) in desc_set_defaults() argument
127 desc_smp_init(desc, node, affinity); in desc_set_defaults()
[all …]
Dmsi.c24 * @affinity: Optional pointer to an affinity mask array size of @nvec
26 * If @affinity is not NULL then an affinity array[@nvec] is allocated
27 * and the affinity masks and flags from @affinity are copied.
30 const struct irq_affinity_desc *affinity) in alloc_msi_entry() argument
41 if (affinity) { in alloc_msi_entry()
42 desc->affinity = kmemdup(affinity, in alloc_msi_entry()
43 nvec * sizeof(*desc->affinity), GFP_KERNEL); in alloc_msi_entry()
44 if (!desc->affinity) { in alloc_msi_entry()
55 kfree(entry->affinity); in free_msi_entry()
93 * msi_domain_set_affinity - Generic affinity setter function for MSI domains
[all …]
Daffinity.c88 /* Calculate the number of nodes in the supplied affinity mask */ in get_nodes_in_cpumask()
334 * build affinity in two stages:
409 * irq_create_affinity_masks - Create affinity masks for multiqueue spreading
411 * @affd: Description of the affinity requirements
453 /* Fill out vectors at the beginning that don't need affinity */ in irq_create_affinity_masks()
459 * have multiple sets, build each sets affinity mask separately. in irq_create_affinity_masks()
475 /* Fill out vectors at the end that don't need affinity */ in irq_create_affinity_masks()
494 * @affd: Description of the affinity requirements
/Linux-v5.10/lib/
Dcpu_rmap.c3 * cpu_rmap.c: CPU affinity reverse-map support
14 * CPU affinity. However, we do not assume that the object affinities
21 * alloc_cpu_rmap - allocate CPU affinity reverse-map
150 * cpu_rmap_update - update CPU rmap following a change of object affinity
152 * @index: Index of object whose affinity changed
153 * @affinity: New CPU affinity of object
156 const struct cpumask *affinity) in cpu_rmap_update() argument
176 /* Set distance to 0 for all CPUs in the new affinity mask. in cpu_rmap_update()
179 for_each_cpu(cpu, affinity) { in cpu_rmap_update()
211 /* Glue between IRQ affinity notifiers and CPU rmaps */
[all …]
/Linux-v5.10/tools/virtio/ringtest/
Drun-on-all.sh19 echo "GUEST AFFINITY $cpu"
20 "$@" --host-affinity $HOST_AFFINITY --guest-affinity $cpu
23 echo "NO GUEST AFFINITY"
24 "$@" --host-affinity $HOST_AFFINITY
25 echo "NO AFFINITY"
/Linux-v5.10/tools/perf/util/
Daffinity.c2 /* Manage affinity to optimize IPIs inside the kernel perf API. */
10 #include "affinity.h"
24 int affinity__setup(struct affinity *a) in affinity__setup()
44 * It is more efficient to change perf's affinity to the target
48 void affinity__set(struct affinity *a, int cpu) in affinity__set()
57 * We ignore errors because affinity is just an optimization. in affinity__set()
65 void affinity__cleanup(struct affinity *a) in affinity__cleanup()
Daffinity.h7 struct affinity { struct
13 void affinity__cleanup(struct affinity *a); argument
14 void affinity__set(struct affinity *a, int cpu);
15 int affinity__setup(struct affinity *a);
Dmmap.c97 static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity) in perf_mmap__aio_bind() argument
105 if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) { in perf_mmap__aio_bind()
141 int cpu __maybe_unused, int affinity __maybe_unused) in perf_mmap__aio_bind()
175 ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity); in perf_mmap__aio_mmap()
265 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) in perf_mmap__setup_affinity_mask()
267 else if (mp->affinity == PERF_AFFINITY_CPU) in perf_mmap__setup_affinity_mask()
281 if (mp->affinity != PERF_AFFINITY_SYS && in mmap__mmap()
283 pr_debug2("failed to alloc mmap affinity mask, error %d\n", in mmap__mmap()
/Linux-v5.10/tools/testing/selftests/rseq/
Dbasic_test.c18 cpu_set_t affinity, test_affinity; in test_cpu_pointer() local
21 sched_getaffinity(0, sizeof(affinity), &affinity); in test_cpu_pointer()
24 if (CPU_ISSET(i, &affinity)) { in test_cpu_pointer()
35 sched_setaffinity(0, sizeof(affinity), &affinity); in test_cpu_pointer()
/Linux-v5.10/drivers/infiniband/hw/hfi1/
Daffinity.h78 /* Initialize driver affinity data */
81 * Set IRQ affinity to a CPU. The function will determine the
82 * CPU and set the affinity to it.
87 * Remove the IRQ's CPU affinity. This function also updates
93 * Determine a CPU affinity for a user process, if the process does not
94 * have an affinity set yet.
118 struct mutex lock; /* protects affinity nodes */
Daffinity.c54 #include "affinity.h"
187 * The real cpu mask is part of the affinity struct but it has to be in node_affinity_init()
428 hfi1_cdbg(AFFINITY, in _dev_comp_vect_mappings_destroy()
478 hfi1_cdbg(AFFINITY, in _dev_comp_vect_mappings_create()
554 … "Number of kernel receive queues is too large for completion vector affinity to be effective\n"); in _dev_comp_vect_cpu_mask_init()
583 hfi1_cdbg(AFFINITY, in _dev_comp_vect_cpu_mask_init()
584 "[%s] Completion vector affinity CPU set(s) %*pbl", in _dev_comp_vect_cpu_mask_init()
623 * Interrupt affinity.
659 * If this is the first time this NUMA node's affinity is used, in hfi1_dev_affinity_init()
660 * create an entry in the global affinity structure and initialize it. in hfi1_dev_affinity_init()
[all …]
/Linux-v5.10/tools/power/cpupower/bench/
Dsystem.c67 * sets cpu affinity for the process
69 * @param cpu cpu# to which the affinity should be set
72 * @retval -1 when setting the affinity failed
82 dprintf("set affinity to cpu #%u\n", cpu); in set_cpu_affinity()
86 fprintf(stderr, "warning: unable to set cpu affinity\n"); in set_cpu_affinity()
146 * sets up the cpu affinity and scheduler priority
155 printf("set cpu affinity to cpu #%u\n", config->cpu); in prepare_system()
/Linux-v5.10/include/linux/
Dcpu_rmap.h6 * cpu_rmap.c: CPU affinity reverse-map support
16 * struct cpu_rmap - CPU affinity reverse-map
22 * based on affinity masks
40 const struct cpumask *affinity);
53 * alloc_irq_cpu_rmap - allocate CPU affinity reverse-map for IRQs
Dirq.h59 * it from affinity setting
66 * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set)
115 * IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity
116 * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity
136 * @affinity: IRQ affinity on SMP. If this is an IPI
139 * @effective_affinity: The effective IRQ affinity on SMP as some irq
141 * A subset of @affinity.
143 * @ipi_offset: Offset of first IPI target cpu in @affinity. Optional.
152 cpumask_var_t affinity; member
192 * IRQD_SETAFFINITY_PENDING - Affinity setting is pending
[all …]
/Linux-v5.10/tools/testing/selftests/bpf/
Dbench.c19 .affinity = false,
125 { "affinity", 'a', NULL, 0, "Set consumer/producer thread affinity"},
126 { "prod-affinity", ARG_PROD_AFFINITY_SET, "CPUSET", 0,
127 "Set of CPUs for producer threads; implies --affinity"},
128 { "cons-affinity", ARG_CONS_AFFINITY_SET, "CPUSET", 0,
129 "Set of CPUs for consumer threads; implies --affinity"},
180 env.affinity = true; in parse_arg()
183 env.affinity = true; in parse_arg()
191 env.affinity = true; in parse_arg()
272 fprintf(stderr, "setting affinity to CPU #%d failed: %d\n", in set_thread_affinity()
[all …]
/Linux-v5.10/drivers/iommu/
Dhyperv-iommu.c46 /* Return error If new irq affinity is out of ioapic_max_cpumask. */ in hyperv_ir_set_affinity()
94 * affinity only needs to change IRTE of IOMMU. But Hyper-V doesn't in hyperv_irq_remapping_alloc()
95 * support interrupt remapping function, setting irq affinity of IO-APIC in hyperv_irq_remapping_alloc()
102 * affinity() set vector and dest_apicid directly into IO-APIC entry. in hyperv_irq_remapping_alloc()
107 * Hypver-V IO APIC irq affinity should be in the scope of in hyperv_irq_remapping_alloc()
111 cpumask_copy(desc->irq_common_data.affinity, &ioapic_max_cpumask); in hyperv_irq_remapping_alloc()
170 * max cpu affinity for IOAPIC irqs. Scan cpu 0-255 and set cpu in hyperv_prepare_irq_remapping()
/Linux-v5.10/arch/mips/kernel/
Dmips-mt-fpaff.c20 * CPU mask used to set process affinity for MT VPEs/TCs with FPUs
30 * FPU affinity with the user's requested processor affinity.
63 * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
155 * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
214 printk(KERN_DEBUG "FPU Affinity set after %ld emulations\n", in mt_fp_affinity_init()
/Linux-v5.10/arch/arm64/kernel/
Dsetup.c107 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
114 u32 i, affinity, fs[4], bits[4], ls; in smp_build_mpidr_hash() local
118 * not contribute to affinity levels, ie they never toggle. in smp_build_mpidr_hash()
124 * Find and stash the last and first bit set at all affinity levels to in smp_build_mpidr_hash()
128 affinity = MPIDR_AFFINITY_LEVEL(mask, i); in smp_build_mpidr_hash()
132 * to express the affinity level. in smp_build_mpidr_hash()
134 ls = fls(affinity); in smp_build_mpidr_hash()
135 fs[i] = affinity ? ffs(affinity) - 1 : 0; in smp_build_mpidr_hash()
140 * significant bits at each affinity level and by shifting in smp_build_mpidr_hash()
/Linux-v5.10/drivers/gpu/drm/amd/amdkfd/
Dkfd_crat.h85 * ComputeUnit Affinity structure and definitions
120 * HSA Memory Affinity structure and definitions
145 * HSA Cache Affinity structure and definitions
174 * HSA TLB Affinity structure and definitions
209 * HSA CCompute/APU Affinity structure and definitions
228 * HSA IO Link Affinity structure and definitions
/Linux-v5.10/drivers/perf/
Darm_pmu_platform.c62 return !!of_find_property(node, "interrupt-affinity", NULL); in pmu_has_irq_affinity()
71 * If we don't have an interrupt-affinity property, we guess irq in pmu_parse_irq_affinity()
72 * affinity matches our logical CPU order, as we used to assume. in pmu_parse_irq_affinity()
78 dn = of_parse_phandle(node, "interrupt-affinity", i); in pmu_parse_irq_affinity()
80 pr_warn("failed to parse interrupt-affinity[%d] for %pOFn\n", in pmu_parse_irq_affinity()
126 pr_warn("no interrupt-affinity property for %pOF, guessing.\n", in pmu_parse_irqs()
/Linux-v5.10/Documentation/core-api/irq/
Dirq-affinity.rst2 SMP IRQ affinity
14 IRQ affinity then the value will not change from the default of all cpus.
16 /proc/irq/default_smp_affinity specifies default affinity mask that applies
17 to all non-active IRQs. Once IRQ is allocated/activated its affinity bitmask
/Linux-v5.10/drivers/irqchip/
Dirq-bcm7038-l1.c51 u8 affinity[MAX_WORDS * IRQS_PER_WORD]; member
185 __bcm7038_l1_unmask(d, intc->affinity[d->hwirq]); in bcm7038_l1_unmask()
195 __bcm7038_l1_mask(d, intc->affinity[d->hwirq]); in bcm7038_l1_mask()
213 was_disabled = !!(intc->cpus[intc->affinity[hw]]->mask_cache[word] & in bcm7038_l1_set_affinity()
215 __bcm7038_l1_mask(d, intc->affinity[hw]); in bcm7038_l1_set_affinity()
216 intc->affinity[hw] = first_cpu; in bcm7038_l1_set_affinity()
233 /* This CPU was not on the affinity mask */ in bcm7038_l1_cpu_offline()
239 * Multiple CPU affinity, remove this CPU from the affinity in bcm7038_l1_cpu_offline()
/Linux-v5.10/block/
Dblk-mq-rdma.c16 * interrupt vetors as @set has queues. It will then query it's affinity mask
17 * and built queue mapping that maps a queue to the CPUs that have irq affinity
21 * @set->nr_hw_queues, or @dev does not provide an affinity mask for a
/Linux-v5.10/Documentation/devicetree/bindings/interrupt-controller/
Darm,gic-v3.yaml43 If the system requires describing PPI affinity, then the value must
133 PPI affinity can be expressed as a single "ppi-partitions" node,
139 affinity:
146 - affinity
262 affinity = <&cpu0 &cpu2>;
266 affinity = <&cpu1 &cpu3>;

12345678910>>...18