Lines Matching +full:out +full:- +full:masks

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2016-2017 Christoph Hellwig.
27 cpus_per_vec--; in irq_spread_init_one()
31 for (sibl = -1; cpus_per_vec > 0; ) { in irq_spread_init_one()
38 cpus_per_vec--; in irq_spread_init_one()
45 cpumask_var_t *masks; in alloc_node_to_cpumask() local
48 masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL); in alloc_node_to_cpumask()
49 if (!masks) in alloc_node_to_cpumask()
53 if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL)) in alloc_node_to_cpumask()
57 return masks; in alloc_node_to_cpumask()
60 while (--node >= 0) in alloc_node_to_cpumask()
61 free_cpumask_var(masks[node]); in alloc_node_to_cpumask()
62 kfree(masks); in alloc_node_to_cpumask()
66 static void free_node_to_cpumask(cpumask_var_t *masks) in free_node_to_cpumask() argument
71 free_cpumask_var(masks[node]); in free_node_to_cpumask()
72 kfree(masks); in free_node_to_cpumask()
75 static void build_node_to_cpumask(cpumask_var_t *masks) in build_node_to_cpumask() argument
80 cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]); in build_node_to_cpumask()
112 return ln->ncpus - rn->ncpus; in ncpus_cmp_func()
161 * node's nr_cpus to remaining un-assigned ncpus. 'numvecs' is in alloc_nodes_vectors()
166 * least one vector, and the theory is simple: over-allocation in alloc_nodes_vectors()
184 * vecs(B) = V - vecs(A) in alloc_nodes_vectors()
187 * V = N - delta, and 0 <= delta <= N - 2 in alloc_nodes_vectors()
200 * over-allocated, so vecs(B) <= ncpu(B), in alloc_nodes_vectors()
206 * round_down((N - delta) * ncpu(A) / N) = in alloc_nodes_vectors()
207 * round_down((N * ncpu(A) - delta * ncpu(A)) / N) >= in alloc_nodes_vectors()
208 * round_down((N * ncpu(A) - delta * N) / N) = in alloc_nodes_vectors()
209 * cpu(A) - delta in alloc_nodes_vectors()
213 * vecs(A) - V >= ncpu(A) - delta - V in alloc_nodes_vectors()
215 * V - vecs(A) <= V + delta - ncpu(A) in alloc_nodes_vectors()
217 * vecs(B) <= N - ncpu(A) in alloc_nodes_vectors()
223 * and we always re-calculate 'remaining_ncpus' & 'numvecs', and in alloc_nodes_vectors()
242 remaining_ncpus -= ncpus; in alloc_nodes_vectors()
243 numvecs -= nvectors; in alloc_nodes_vectors()
253 struct irq_affinity_desc *masks) in __irq_build_affinity_masks() argument
272 /* Ensure that only CPUs which are in both masks are set */ in __irq_build_affinity_masks()
274 cpumask_or(&masks[curvec].mask, &masks[curvec].mask, nmsk); in __irq_build_affinity_masks()
285 return -ENOMEM; in __irq_build_affinity_masks()
295 if (nv->nvectors == UINT_MAX) in __irq_build_affinity_masks()
299 cpumask_and(nmsk, cpu_mask, node_to_cpumask[nv->id]); in __irq_build_affinity_masks()
304 WARN_ON_ONCE(nv->nvectors > ncpus); in __irq_build_affinity_masks()
307 extra_vecs = ncpus - nv->nvectors * (ncpus / nv->nvectors); in __irq_build_affinity_masks()
310 for (v = 0; v < nv->nvectors; v++, curvec++) { in __irq_build_affinity_masks()
311 cpus_per_vec = ncpus / nv->nvectors; in __irq_build_affinity_masks()
316 --extra_vecs; in __irq_build_affinity_masks()
325 irq_spread_init_one(&masks[curvec].mask, nmsk, in __irq_build_affinity_masks()
328 done += nv->nvectors; in __irq_build_affinity_masks()
341 struct irq_affinity_desc *masks) in irq_build_affinity_masks() argument
346 int ret = -ENOMEM; in irq_build_affinity_masks()
362 /* Spread on present CPUs starting from affd->pre_vectors */ in irq_build_affinity_masks()
365 nmsk, masks); in irq_build_affinity_masks()
374 * out vectors. in irq_build_affinity_masks()
383 masks); in irq_build_affinity_masks()
405 affd->nr_sets = 1; in default_calc_sets()
406 affd->set_size[0] = affvecs; in default_calc_sets()
410 * irq_create_affinity_masks - Create affinity masks for multiqueue spreading
420 struct irq_affinity_desc *masks = NULL; in irq_create_affinity_masks() local
428 if (nvecs > affd->pre_vectors + affd->post_vectors) in irq_create_affinity_masks()
429 affvecs = nvecs - affd->pre_vectors - affd->post_vectors; in irq_create_affinity_masks()
437 if (!affd->calc_sets) in irq_create_affinity_masks()
438 affd->calc_sets = default_calc_sets; in irq_create_affinity_masks()
441 affd->calc_sets(affd, affvecs); in irq_create_affinity_masks()
443 if (WARN_ON_ONCE(affd->nr_sets > IRQ_AFFINITY_MAX_SETS)) in irq_create_affinity_masks()
450 masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL); in irq_create_affinity_masks()
451 if (!masks) in irq_create_affinity_masks()
454 /* Fill out vectors at the beginning that don't need affinity */ in irq_create_affinity_masks()
455 for (curvec = 0; curvec < affd->pre_vectors; curvec++) in irq_create_affinity_masks()
456 cpumask_copy(&masks[curvec].mask, irq_default_affinity); in irq_create_affinity_masks()
459 * Spread on present CPUs starting from affd->pre_vectors. If we in irq_create_affinity_masks()
462 for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) { in irq_create_affinity_masks()
463 unsigned int this_vecs = affd->set_size[i]; in irq_create_affinity_masks()
467 curvec, masks); in irq_create_affinity_masks()
469 kfree(masks); in irq_create_affinity_masks()
476 /* Fill out vectors at the end that don't need affinity */ in irq_create_affinity_masks()
478 curvec = affd->pre_vectors + affvecs; in irq_create_affinity_masks()
480 curvec = affd->pre_vectors + usedvecs; in irq_create_affinity_masks()
482 cpumask_copy(&masks[curvec].mask, irq_default_affinity); in irq_create_affinity_masks()
485 for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++) in irq_create_affinity_masks()
486 masks[i].is_managed = 1; in irq_create_affinity_masks()
488 return masks; in irq_create_affinity_masks()
492 * irq_calc_affinity_vectors - Calculate the optimal number of vectors
500 unsigned int resv = affd->pre_vectors + affd->post_vectors; in irq_calc_affinity_vectors()
506 if (affd->calc_sets) { in irq_calc_affinity_vectors()
507 set_vecs = maxvec - resv; in irq_calc_affinity_vectors()
514 return resv + min(set_vecs, maxvec - resv); in irq_calc_affinity_vectors()