/Linux-v4.19/kernel/cgroup/ |
D | cpuset.c | 105 cpumask_var_t cpus_allowed; member 415 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) && in is_cpuset_subset() 433 if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) in alloc_trial_cpuset() 438 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); in alloc_trial_cpuset() 443 free_cpumask_var(trial->cpus_allowed); in alloc_trial_cpuset() 456 free_cpumask_var(trial->cpus_allowed); in free_trial_cpuset() 514 cpumask_intersects(trial->cpus_allowed, c->cpus_allowed)) in validate_change() 528 if (!cpumask_empty(cur->cpus_allowed) && in validate_change() 529 cpumask_empty(trial->cpus_allowed)) in validate_change() 542 !cpuset_cpumask_can_shrink(cur->cpus_allowed, in validate_change() [all …]
|
/Linux-v4.19/arch/mips/kernel/ |
D | mips-mt-fpaff.c | 68 cpumask_var_t cpus_allowed, new_mask, effective_mask; in mipsmt_sys_sched_setaffinity() local 93 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { in mipsmt_sys_sched_setaffinity() 131 cpuset_cpus_allowed(p, cpus_allowed); in mipsmt_sys_sched_setaffinity() 132 if (!cpumask_subset(effective_mask, cpus_allowed)) { in mipsmt_sys_sched_setaffinity() 138 cpumask_copy(new_mask, cpus_allowed); in mipsmt_sys_sched_setaffinity() 147 free_cpumask_var(cpus_allowed); in mipsmt_sys_sched_setaffinity() 180 cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed); in mipsmt_sys_sched_getaffinity()
|
D | traps.c | 1177 if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) { in mt_ase_fp_affinity() 1181 = current->cpus_allowed; in mt_ase_fp_affinity() 1182 cpumask_and(&tmask, ¤t->cpus_allowed, in mt_ase_fp_affinity()
|
/Linux-v4.19/kernel/sched/ |
D | cpupri.c | 101 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) in cpupri_find() 105 cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); in cpupri_find()
|
D | cpudeadline.c | 127 cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) { in cpudl_find() 134 if (cpumask_test_cpu(best_cpu, &p->cpus_allowed) && in cpudl_find()
|
D | core.c | 880 if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) in is_cpu_allowed() 1006 cpumask_copy(&p->cpus_allowed, new_mask); in set_cpus_allowed_common() 1076 if (cpumask_equal(&p->cpus_allowed, new_mask)) in __set_cpus_allowed_ptr() 1239 if (!cpumask_test_cpu(arg->dst_cpu, &arg->src_task->cpus_allowed)) in migrate_swap_stop() 1242 if (!cpumask_test_cpu(arg->src_cpu, &arg->dst_task->cpus_allowed)) in migrate_swap_stop() 1284 if (!cpumask_test_cpu(arg.dst_cpu, &arg.src_task->cpus_allowed)) in migrate_swap() 1287 if (!cpumask_test_cpu(arg.src_cpu, &arg.dst_task->cpus_allowed)) in migrate_swap() 1472 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) in select_fallback_rq() 1479 for_each_cpu(dest_cpu, &p->cpus_allowed) { in select_fallback_rq() 1533 cpu = cpumask_any(&p->cpus_allowed); in select_task_rq() [all …]
|
D | fair.c | 1634 if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed)) in task_numa_compare() 1731 if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed)) in task_numa_find_cpu() 5714 &p->cpus_allowed)) in find_idlest_group() 5846 for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) { in find_idlest_group_cpu() 5886 if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed)) in find_idlest_cpu() 6002 cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed); in select_idle_core() 6036 if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) in select_idle_smt() 6099 if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) in select_idle_cpu() 6136 cpumask_test_cpu(p->recent_used_cpu, &p->cpus_allowed)) { in select_idle_sibling() 6320 && cpumask_test_cpu(cpu, &p->cpus_allowed); in select_task_rq_fair() [all …]
|
D | deadline.c | 542 cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed); in dl_task_offline_migration() 1827 cpumask_test_cpu(cpu, &p->cpus_allowed)) in pick_dl_task() 1977 !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) || in find_lock_later_rq()
|
D | rt.c | 1614 cpumask_test_cpu(cpu, &p->cpus_allowed)) in pick_rt_task() 1751 !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) || in find_lock_lowest_rq()
|
/Linux-v4.19/lib/ |
D | smp_processor_id.c | 25 if (cpumask_equal(¤t->cpus_allowed, cpumask_of(this_cpu))) in check_preemption_disabled()
|
/Linux-v4.19/samples/trace_events/ |
D | trace-events-sample.c | 36 ¤t->cpus_allowed); in simple_thread_func()
|
/Linux-v4.19/arch/mips/include/asm/ |
D | switch_to.h | 60 prev->cpus_allowed = prev->thread.user_cpus_allowed; \
|
/Linux-v4.19/init/ |
D | init_task.c | 74 .cpus_allowed = CPU_MASK_ALL,
|
/Linux-v4.19/arch/powerpc/platforms/cell/spufs/ |
D | sched.c | 144 cpumask_copy(&ctx->cpus_allowed, ¤t->cpus_allowed); in __spu_update_sched_info() 173 if (cpumask_intersects(mask, &ctx->cpus_allowed)) in __node_allowed()
|
D | spufs.h | 124 cpumask_t cpus_allowed; member
|
/Linux-v4.19/drivers/crypto/caam/ |
D | qi.c | 500 struct cpumask old_cpumask = current->cpus_allowed; in caam_qi_shutdown() 721 struct cpumask old_cpumask = current->cpus_allowed; in caam_qi_init()
|
/Linux-v4.19/fs/proc/ |
D | array.c | 384 cpumask_pr_args(&task->cpus_allowed)); in task_cpus_allowed() 386 cpumask_pr_args(&task->cpus_allowed)); in task_cpus_allowed()
|
/Linux-v4.19/kernel/trace/ |
D | trace_hwlat.c | 280 if (!cpumask_equal(current_mask, ¤t->cpus_allowed)) in move_to_next_cpu()
|
/Linux-v4.19/drivers/infiniband/hw/qib/ |
D | qib_file_ops.c | 1145 const unsigned int weight = cpumask_weight(¤t->cpus_allowed); in assign_ctxt_affinity() 1626 const unsigned int cpu = cpumask_first(¤t->cpus_allowed); in qib_assign_ctxt() 1628 cpumask_weight(¤t->cpus_allowed); in qib_assign_ctxt()
|
/Linux-v4.19/Documentation/cgroup-v1/ |
D | cpusets.txt | 58 schedule a task on a CPU that is not allowed in its cpus_allowed 158 displaying the task's cpus_allowed (on which CPUs it may be scheduled) 480 (makes sure that all the CPUs in the cpus_allowed of that cpuset are 654 their cpus_allowed to allow all online CPUs. When memory hotplug
|
/Linux-v4.19/arch/x86/kernel/cpu/ |
D | intel_rdt_pseudo_lock.c | 1438 if (!cpumask_subset(¤t->cpus_allowed, &plr->d->cpu_mask)) { in pseudo_lock_dev_mmap()
|
/Linux-v4.19/drivers/infiniband/hw/hfi1/ |
D | affinity.c | 1040 *proc_mask = ¤t->cpus_allowed; in hfi1_get_proc_affinity()
|
D | sdma.c | 858 const struct cpumask *current_mask = ¤t->cpus_allowed; in sdma_select_user_engine()
|
/Linux-v4.19/include/linux/ |
D | sched.h | 663 cpumask_t cpus_allowed; member
|
/Linux-v4.19/arch/ia64/kernel/ |
D | mca.c | 1827 cpumask_set_cpu(cpu, &p->cpus_allowed); in format_mca_init_stack()
|