Lines Matching refs:cpu

258 void smp_muxed_ipi_set_message(int cpu, int msg)  in smp_muxed_ipi_set_message()  argument
260 struct cpu_messages *info = &per_cpu(ipi_message, cpu); in smp_muxed_ipi_set_message()
270 void smp_muxed_ipi_message_pass(int cpu, int msg) in smp_muxed_ipi_message_pass() argument
272 smp_muxed_ipi_set_message(cpu, msg); in smp_muxed_ipi_message_pass()
278 smp_ops->cause_ipi(cpu); in smp_muxed_ipi_message_pass()
332 static inline void do_message_pass(int cpu, int msg) in do_message_pass() argument
335 smp_ops->message_pass(cpu, msg); in do_message_pass()
338 smp_muxed_ipi_message_pass(cpu, msg); in do_message_pass()
342 void smp_send_reschedule(int cpu) in smp_send_reschedule() argument
345 do_message_pass(cpu, PPC_MSG_RESCHEDULE); in smp_send_reschedule()
349 void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument
351 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); in arch_send_call_function_single_ipi()
356 unsigned int cpu; in arch_send_call_function_ipi_mask() local
358 for_each_cpu(cpu, mask) in arch_send_call_function_ipi_mask()
359 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); in arch_send_call_function_ipi_mask()
450 static void do_smp_send_nmi_ipi(int cpu, bool safe) in do_smp_send_nmi_ipi() argument
452 if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu)) in do_smp_send_nmi_ipi()
455 if (cpu >= 0) { in do_smp_send_nmi_ipi()
456 do_message_pass(cpu, PPC_MSG_NMI_IPI); in do_smp_send_nmi_ipi()
474 static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), in __smp_send_nmi_ipi() argument
481 BUG_ON(cpu == me); in __smp_send_nmi_ipi()
482 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS); in __smp_send_nmi_ipi()
498 if (cpu < 0) { in __smp_send_nmi_ipi()
503 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask); in __smp_send_nmi_ipi()
510 do_smp_send_nmi_ipi(cpu, safe); in __smp_send_nmi_ipi()
539 int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) in smp_send_nmi_ipi() argument
541 return __smp_send_nmi_ipi(cpu, fn, delay_us, false); in smp_send_nmi_ipi()
544 int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) in smp_send_safe_nmi_ipi() argument
546 return __smp_send_nmi_ipi(cpu, fn, delay_us, true); in smp_send_safe_nmi_ipi()
553 unsigned int cpu; in tick_broadcast() local
555 for_each_cpu(cpu, mask) in tick_broadcast()
556 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST); in tick_broadcast()
575 int cpu; in crash_send_ipi() local
579 for_each_present_cpu(cpu) { in crash_send_ipi()
580 if (cpu_online(cpu)) in crash_send_ipi()
591 do_smp_send_nmi_ipi(cpu, false); in crash_send_ipi()
789 static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg) in get_cpu_thread_group_start() argument
791 int hw_cpu_id = get_hard_smp_processor_id(cpu); in get_cpu_thread_group_start()
808 static int init_cpu_l1_cache_map(int cpu) in init_cpu_l1_cache_map() argument
811 struct device_node *dn = of_get_cpu_node(cpu, NULL); in init_cpu_l1_cache_map()
815 int first_thread = cpu_first_thread_sibling(cpu); in init_cpu_l1_cache_map()
825 cpu_group_start = get_cpu_thread_group_start(cpu, &tg); in init_cpu_l1_cache_map()
833 zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu), in init_cpu_l1_cache_map()
834 GFP_KERNEL, cpu_to_node(cpu)); in init_cpu_l1_cache_map()
846 cpumask_set_cpu(i, per_cpu(cpu_l1_cache_map, cpu)); in init_cpu_l1_cache_map()
885 static const struct cpumask *shared_cache_mask(int cpu) in shared_cache_mask() argument
887 return per_cpu(cpu_l2_cache_map, cpu); in shared_cache_mask()
891 static const struct cpumask *smallcore_smt_mask(int cpu) in smallcore_smt_mask() argument
893 return cpu_smallcore_mask(cpu); in smallcore_smt_mask()
897 static struct cpumask *cpu_coregroup_mask(int cpu) in cpu_coregroup_mask() argument
899 return per_cpu(cpu_coregroup_map, cpu); in cpu_coregroup_mask()
907 static const struct cpumask *cpu_mc_mask(int cpu) in cpu_mc_mask() argument
909 return cpu_coregroup_mask(cpu); in cpu_mc_mask()
924 int cpu; in init_big_cores() local
926 for_each_possible_cpu(cpu) { in init_big_cores()
927 int err = init_cpu_l1_cache_map(cpu); in init_big_cores()
932 zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu), in init_big_cores()
934 cpu_to_node(cpu)); in init_big_cores()
943 unsigned int cpu; in smp_prepare_cpus() local
957 for_each_possible_cpu(cpu) { in smp_prepare_cpus()
958 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), in smp_prepare_cpus()
959 GFP_KERNEL, cpu_to_node(cpu)); in smp_prepare_cpus()
960 zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu), in smp_prepare_cpus()
961 GFP_KERNEL, cpu_to_node(cpu)); in smp_prepare_cpus()
962 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), in smp_prepare_cpus()
963 GFP_KERNEL, cpu_to_node(cpu)); in smp_prepare_cpus()
965 zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu), in smp_prepare_cpus()
966 GFP_KERNEL, cpu_to_node(cpu)); in smp_prepare_cpus()
972 if (cpu_present(cpu)) { in smp_prepare_cpus()
973 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]); in smp_prepare_cpus()
974 set_cpu_numa_mem(cpu, in smp_prepare_cpus()
975 local_memory_node(numa_cpu_lookup_table[cpu])); in smp_prepare_cpus()
983 cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu)); in smp_prepare_cpus()
1017 unsigned int cpu = smp_processor_id(); in generic_cpu_disable() local
1019 if (cpu == boot_cpuid) in generic_cpu_disable()
1022 set_cpu_online(cpu, false); in generic_cpu_disable()
1044 void generic_cpu_die(unsigned int cpu) in generic_cpu_die() argument
1050 if (is_cpu_dead(cpu)) in generic_cpu_die()
1054 printk(KERN_ERR "CPU%d didn't die...\n", cpu); in generic_cpu_die()
1057 void generic_set_cpu_dead(unsigned int cpu) in generic_set_cpu_dead() argument
1059 per_cpu(cpu_state, cpu) = CPU_DEAD; in generic_set_cpu_dead()
1067 void generic_set_cpu_up(unsigned int cpu) in generic_set_cpu_up() argument
1069 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; in generic_set_cpu_up()
1072 int generic_check_cpu_restart(unsigned int cpu) in generic_check_cpu_restart() argument
1074 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; in generic_check_cpu_restart()
1077 int is_cpu_dead(unsigned int cpu) in is_cpu_dead() argument
1079 return per_cpu(cpu_state, cpu) == CPU_DEAD; in is_cpu_dead()
1093 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) in cpu_idle_thread_init() argument
1096 paca_ptrs[cpu]->__current = idle; in cpu_idle_thread_init()
1097 paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) + in cpu_idle_thread_init()
1100 idle->cpu = cpu; in cpu_idle_thread_init()
1101 secondary_current = current_set[cpu] = idle; in cpu_idle_thread_init()
1104 int __cpu_up(unsigned int cpu, struct task_struct *tidle) in __cpu_up() argument
1112 cpu_thread_in_subcore(cpu)) in __cpu_up()
1116 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) in __cpu_up()
1119 cpu_idle_thread_init(cpu, tidle); in __cpu_up()
1126 rc = smp_ops->prepare_cpu(cpu); in __cpu_up()
1134 cpu_callin_map[cpu] = 0; in __cpu_up()
1143 DBG("smp: kicking cpu %d\n", cpu); in __cpu_up()
1144 rc = smp_ops->kick_cpu(cpu); in __cpu_up()
1146 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); in __cpu_up()
1156 for (c = 50000; c && !cpu_callin_map[cpu]; c--) in __cpu_up()
1164 for (c = 5000; c && !cpu_callin_map[cpu]; c--) in __cpu_up()
1168 if (!cpu_callin_map[cpu]) { in __cpu_up()
1169 printk(KERN_ERR "Processor %u is stuck.\n", cpu); in __cpu_up()
1173 DBG("Processor %u found.\n", cpu); in __cpu_up()
1179 spin_until_cond(cpu_online(cpu)); in __cpu_up()
1187 int cpu_to_core_id(int cpu) in cpu_to_core_id() argument
1193 np = of_get_cpu_node(cpu, NULL); in cpu_to_core_id()
1209 int cpu_core_index_of_thread(int cpu) in cpu_core_index_of_thread() argument
1211 return cpu >> threads_shift; in cpu_core_index_of_thread()
1224 static struct device_node *cpu_to_l2cache(int cpu) in cpu_to_l2cache() argument
1229 if (!cpu_present(cpu)) in cpu_to_l2cache()
1232 np = of_get_cpu_node(cpu, NULL); in cpu_to_l2cache()
1243 static bool update_mask_by_l2(int cpu, cpumask_var_t *mask) in update_mask_by_l2() argument
1252 l2_cache = cpu_to_l2cache(cpu); in update_mask_by_l2()
1255 for_each_cpu(i, submask_fn(cpu)) in update_mask_by_l2()
1256 set_cpus_related(cpu, i, cpu_l2_cache_mask); in update_mask_by_l2()
1261 cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu)); in update_mask_by_l2()
1264 or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask); in update_mask_by_l2()
1267 cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu)); in update_mask_by_l2()
1278 or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask); in update_mask_by_l2()
1292 static void remove_cpu_from_masks(int cpu) in remove_cpu_from_masks() argument
1300 for_each_cpu(i, mask_fn(cpu)) { in remove_cpu_from_masks()
1301 set_cpus_unrelated(cpu, i, cpu_l2_cache_mask); in remove_cpu_from_masks()
1302 set_cpus_unrelated(cpu, i, cpu_sibling_mask); in remove_cpu_from_masks()
1304 set_cpus_unrelated(cpu, i, cpu_smallcore_mask); in remove_cpu_from_masks()
1308 for_each_cpu(i, cpu_coregroup_mask(cpu)) in remove_cpu_from_masks()
1309 set_cpus_unrelated(cpu, i, cpu_coregroup_mask); in remove_cpu_from_masks()
1314 static inline void add_cpu_to_smallcore_masks(int cpu) in add_cpu_to_smallcore_masks() argument
1321 cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu)); in add_cpu_to_smallcore_masks()
1323 for_each_cpu(i, per_cpu(cpu_l1_cache_map, cpu)) { in add_cpu_to_smallcore_masks()
1325 set_cpus_related(i, cpu, cpu_smallcore_mask); in add_cpu_to_smallcore_masks()
1329 static void update_coregroup_mask(int cpu, cpumask_var_t *mask) in update_coregroup_mask() argument
1332 int coregroup_id = cpu_to_coregroup_id(cpu); in update_coregroup_mask()
1340 for_each_cpu(i, submask_fn(cpu)) in update_coregroup_mask()
1341 set_cpus_related(cpu, i, cpu_coregroup_mask); in update_coregroup_mask()
1346 cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu)); in update_coregroup_mask()
1349 or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask); in update_coregroup_mask()
1352 cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu)); in update_coregroup_mask()
1357 or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask); in update_coregroup_mask()
1365 static void add_cpu_to_masks(int cpu) in add_cpu_to_masks() argument
1367 int first_thread = cpu_first_thread_sibling(cpu); in add_cpu_to_masks()
1375 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); in add_cpu_to_masks()
1379 set_cpus_related(i, cpu, cpu_sibling_mask); in add_cpu_to_masks()
1381 add_cpu_to_smallcore_masks(cpu); in add_cpu_to_masks()
1384 alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu)); in add_cpu_to_masks()
1385 update_mask_by_l2(cpu, &mask); in add_cpu_to_masks()
1388 update_coregroup_mask(cpu, &mask); in add_cpu_to_masks()
1396 unsigned int cpu = raw_smp_processor_id(); in start_secondary() local
1401 smp_store_cpu_info(cpu); in start_secondary()
1403 rcu_cpu_starting(cpu); in start_secondary()
1405 cpu_callin_map[cpu] = 1; in start_secondary()
1408 smp_ops->setup_cpu(cpu); in start_secondary()
1421 add_cpu_to_masks(cpu); in start_secondary()
1429 struct cpumask *mask = cpu_l2_cache_mask(cpu); in start_secondary()
1434 if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu))) in start_secondary()
1438 set_numa_node(numa_cpu_lookup_table[cpu]); in start_secondary()
1439 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); in start_secondary()
1442 notify_cpu_starting(cpu); in start_secondary()
1443 set_cpu_online(cpu, true); in start_secondary()
1521 int cpu = smp_processor_id(); in __cpu_disable() local
1534 remove_cpu_from_masks(cpu); in __cpu_disable()
1539 void __cpu_die(unsigned int cpu) in __cpu_die() argument
1542 smp_ops->cpu_die(cpu); in __cpu_die()