Lines Matching full:cpu
29 #include <linux/cpu.h>
73 /* State of each CPU during hotplug phases */
123 * On big-cores system, thread_group_l1_cache_map for each CPU corresponds to
129 * On some big-cores system, thread_group_l2_cache_map for each CPU
136 * On P10, thread_group_l3_cache_map for each CPU is equal to the
150 * Returns 1 if the specified cpu should be brought up during boot.
284 void smp_muxed_ipi_set_message(int cpu, int msg) in smp_muxed_ipi_set_message() argument
286 struct cpu_messages *info = &per_cpu(ipi_message, cpu); in smp_muxed_ipi_set_message()
296 void smp_muxed_ipi_message_pass(int cpu, int msg) in smp_muxed_ipi_message_pass() argument
298 smp_muxed_ipi_set_message(cpu, msg); in smp_muxed_ipi_message_pass()
304 smp_ops->cause_ipi(cpu); in smp_muxed_ipi_message_pass()
358 static inline void do_message_pass(int cpu, int msg) in do_message_pass() argument
361 smp_ops->message_pass(cpu, msg); in do_message_pass()
364 smp_muxed_ipi_message_pass(cpu, msg); in do_message_pass()
368 void smp_send_reschedule(int cpu) in smp_send_reschedule() argument
371 do_message_pass(cpu, PPC_MSG_RESCHEDULE); in smp_send_reschedule()
375 void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument
377 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); in arch_send_call_function_single_ipi()
382 unsigned int cpu; in arch_send_call_function_ipi_mask() local
384 for_each_cpu(cpu, mask) in arch_send_call_function_ipi_mask()
385 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); in arch_send_call_function_ipi_mask()
476 static void do_smp_send_nmi_ipi(int cpu, bool safe) in do_smp_send_nmi_ipi() argument
478 if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu)) in do_smp_send_nmi_ipi()
481 if (cpu >= 0) { in do_smp_send_nmi_ipi()
482 do_message_pass(cpu, PPC_MSG_NMI_IPI); in do_smp_send_nmi_ipi()
495 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
500 static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), in __smp_send_nmi_ipi() argument
507 BUG_ON(cpu == me); in __smp_send_nmi_ipi()
508 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS); in __smp_send_nmi_ipi()
524 if (cpu < 0) { in __smp_send_nmi_ipi()
529 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask); in __smp_send_nmi_ipi()
536 do_smp_send_nmi_ipi(cpu, safe); in __smp_send_nmi_ipi()
565 int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) in smp_send_nmi_ipi() argument
567 return __smp_send_nmi_ipi(cpu, fn, delay_us, false); in smp_send_nmi_ipi()
570 int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) in smp_send_safe_nmi_ipi() argument
572 return __smp_send_nmi_ipi(cpu, fn, delay_us, true); in smp_send_safe_nmi_ipi()
579 unsigned int cpu; in tick_broadcast() local
581 for_each_cpu(cpu, mask) in tick_broadcast()
582 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST); in tick_broadcast()
601 int cpu; in crash_send_ipi() local
605 for_each_present_cpu(cpu) { in crash_send_ipi()
606 if (cpu_online(cpu)) in crash_send_ipi()
617 do_smp_send_nmi_ipi(cpu, false); in crash_send_ipi()
691 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
693 * returns the that cpumask for the given CPU.
711 * Extends set_cpus_related. Instead of setting one CPU at a time in
734 * property for the CPU device node @dn and stores
738 * @dn: The device node of the CPU device.
743 * the CPU-device node can be grouped together based on the property.
833 * that @cpu belongs to.
835 * @cpu : The logical CPU whose thread group is being searched.
836 * @tg : The thread-group structure of the CPU node which @cpu belongs
840 * of the thread_group that @cpu belongs to.
842 * Returns -1 if cpu doesn't belong to any of the groups pointed to by
845 static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg) in get_cpu_thread_group_start() argument
847 int hw_cpu_id = get_hard_smp_processor_id(cpu); in get_cpu_thread_group_start()
864 static struct thread_groups *__init get_thread_groups(int cpu, in get_thread_groups() argument
868 struct device_node *dn = of_get_cpu_node(cpu, NULL); in get_thread_groups()
869 struct thread_groups_list *cpu_tgl = &tgl[cpu]; in get_thread_groups()
899 static int update_mask_from_threadgroup(cpumask_var_t *mask, struct thread_groups *tg, int cpu, int… in update_mask_from_threadgroup() argument
901 int first_thread = cpu_first_thread_sibling(cpu); in update_mask_from_threadgroup()
904 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu)); in update_mask_from_threadgroup()
921 static int __init init_thread_group_cache_map(int cpu, int cache_property) in init_thread_group_cache_map() argument
932 tg = get_thread_groups(cpu, cache_property, &err); in init_thread_group_cache_map()
937 cpu_group_start = get_cpu_thread_group_start(cpu, tg); in init_thread_group_cache_map()
945 mask = &per_cpu(thread_group_l1_cache_map, cpu); in init_thread_group_cache_map()
946 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start); in init_thread_group_cache_map()
949 mask = &per_cpu(thread_group_l2_cache_map, cpu); in init_thread_group_cache_map()
950 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start); in init_thread_group_cache_map()
951 mask = &per_cpu(thread_group_l3_cache_map, cpu); in init_thread_group_cache_map()
952 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start); in init_thread_group_cache_map()
990 static const struct cpumask *shared_cache_mask(int cpu) in shared_cache_mask() argument
992 return per_cpu(cpu_l2_cache_map, cpu); in shared_cache_mask()
996 static const struct cpumask *smallcore_smt_mask(int cpu) in smallcore_smt_mask() argument
998 return cpu_smallcore_mask(cpu); in smallcore_smt_mask()
1002 static struct cpumask *cpu_coregroup_mask(int cpu) in cpu_coregroup_mask() argument
1004 return per_cpu(cpu_coregroup_map, cpu); in cpu_coregroup_mask()
1012 static const struct cpumask *cpu_mc_mask(int cpu) in cpu_mc_mask() argument
1014 return cpu_coregroup_mask(cpu); in cpu_mc_mask()
1029 int cpu; in init_big_cores() local
1031 for_each_possible_cpu(cpu) { in init_big_cores()
1032 int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1); in init_big_cores()
1037 zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu), in init_big_cores()
1039 cpu_to_node(cpu)); in init_big_cores()
1044 for_each_possible_cpu(cpu) { in init_big_cores()
1045 int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L2_L3); in init_big_cores()
1060 unsigned int cpu; in smp_prepare_cpus() local
1065 * setup_cpu may need to be called on the boot cpu. We havent in smp_prepare_cpus()
1070 /* Fixup boot cpu */ in smp_prepare_cpus()
1074 for_each_possible_cpu(cpu) { in smp_prepare_cpus()
1075 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), in smp_prepare_cpus()
1076 GFP_KERNEL, cpu_to_node(cpu)); in smp_prepare_cpus()
1077 zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu), in smp_prepare_cpus()
1078 GFP_KERNEL, cpu_to_node(cpu)); in smp_prepare_cpus()
1079 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), in smp_prepare_cpus()
1080 GFP_KERNEL, cpu_to_node(cpu)); in smp_prepare_cpus()
1082 zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu), in smp_prepare_cpus()
1083 GFP_KERNEL, cpu_to_node(cpu)); in smp_prepare_cpus()
1089 if (cpu_present(cpu)) { in smp_prepare_cpus()
1090 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]); in smp_prepare_cpus()
1091 set_cpu_numa_mem(cpu, in smp_prepare_cpus()
1092 local_memory_node(numa_cpu_lookup_table[cpu])); in smp_prepare_cpus()
1097 /* Init the cpumasks so the boot CPU is related to itself */ in smp_prepare_cpus()
1143 unsigned int cpu = smp_processor_id(); in generic_cpu_disable() local
1145 if (cpu == boot_cpuid) in generic_cpu_disable()
1148 set_cpu_online(cpu, false); in generic_cpu_disable()
1152 /* Update affinity of all IRQs previously aimed at this CPU */ in generic_cpu_disable()
1157 * that one of the interrupts we just migrated away from this CPU is in generic_cpu_disable()
1158 * actually already pending on this CPU. If we leave it in that state in generic_cpu_disable()
1161 * be received (and EOI'ed), before we take this CPU offline. in generic_cpu_disable()
1170 void generic_cpu_die(unsigned int cpu) in generic_cpu_die() argument
1176 if (is_cpu_dead(cpu)) in generic_cpu_die()
1180 printk(KERN_ERR "CPU%d didn't die...\n", cpu); in generic_cpu_die()
1183 void generic_set_cpu_dead(unsigned int cpu) in generic_set_cpu_dead() argument
1185 per_cpu(cpu_state, cpu) = CPU_DEAD; in generic_set_cpu_dead()
1193 void generic_set_cpu_up(unsigned int cpu) in generic_set_cpu_up() argument
1195 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; in generic_set_cpu_up()
1198 int generic_check_cpu_restart(unsigned int cpu) in generic_check_cpu_restart() argument
1200 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; in generic_check_cpu_restart()
1203 int is_cpu_dead(unsigned int cpu) in is_cpu_dead() argument
1205 return per_cpu(cpu_state, cpu) == CPU_DEAD; in is_cpu_dead()
1219 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) in cpu_idle_thread_init() argument
1222 paca_ptrs[cpu]->__current = idle; in cpu_idle_thread_init()
1223 paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) + in cpu_idle_thread_init()
1226 idle->cpu = cpu; in cpu_idle_thread_init()
1227 secondary_current = current_set[cpu] = idle; in cpu_idle_thread_init()
1230 int __cpu_up(unsigned int cpu, struct task_struct *tidle) in __cpu_up() argument
1238 cpu_thread_in_subcore(cpu)) in __cpu_up()
1242 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) in __cpu_up()
1245 cpu_idle_thread_init(cpu, tidle); in __cpu_up()
1249 * up the CPU in __cpu_up()
1252 rc = smp_ops->prepare_cpu(cpu); in __cpu_up()
1257 /* Make sure callin-map entry is 0 (can be leftover a CPU in __cpu_up()
1260 cpu_callin_map[cpu] = 0; in __cpu_up()
1269 DBG("smp: kicking cpu %d\n", cpu); in __cpu_up()
1270 rc = smp_ops->kick_cpu(cpu); in __cpu_up()
1272 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); in __cpu_up()
1277 * wait to see if the cpu made a callin (is actually up). in __cpu_up()
1282 for (c = 50000; c && !cpu_callin_map[cpu]; c--) in __cpu_up()
1290 for (c = 5000; c && !cpu_callin_map[cpu]; c--) in __cpu_up()
1294 if (!cpu_callin_map[cpu]) { in __cpu_up()
1295 printk(KERN_ERR "Processor %u is stuck.\n", cpu); in __cpu_up()
1299 DBG("Processor %u found.\n", cpu); in __cpu_up()
1304 /* Wait until cpu puts itself in the online & active maps */ in __cpu_up()
1305 spin_until_cond(cpu_online(cpu)); in __cpu_up()
1311 * logical cpu.
1313 int cpu_to_core_id(int cpu) in cpu_to_core_id() argument
1319 np = of_get_cpu_node(cpu, NULL); in cpu_to_core_id()
1334 /* Helper routines for cpu to core mapping */
1335 int cpu_core_index_of_thread(int cpu) in cpu_core_index_of_thread() argument
1337 return cpu >> threads_shift; in cpu_core_index_of_thread()
1348 * i.e. during cpu online or offline.
1350 static struct device_node *cpu_to_l2cache(int cpu) in cpu_to_l2cache() argument
1355 if (!cpu_present(cpu)) in cpu_to_l2cache()
1358 np = of_get_cpu_node(cpu, NULL); in cpu_to_l2cache()
1369 static bool update_mask_by_l2(int cpu, cpumask_var_t *mask) in update_mask_by_l2() argument
1383 cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu)); in update_mask_by_l2()
1385 for_each_cpu(i, per_cpu(thread_group_l2_cache_map, cpu)) { in update_mask_by_l2()
1387 set_cpus_related(i, cpu, cpu_l2_cache_mask); in update_mask_by_l2()
1391 if (!cpumask_equal(submask_fn(cpu), cpu_l2_cache_mask(cpu)) && in update_mask_by_l2()
1392 !cpumask_subset(submask_fn(cpu), cpu_l2_cache_mask(cpu))) { in update_mask_by_l2()
1393 pr_warn_once("CPU %d : Inconsistent L1 and L2 cache siblings\n", in update_mask_by_l2()
1394 cpu); in update_mask_by_l2()
1400 l2_cache = cpu_to_l2cache(cpu); in update_mask_by_l2()
1402 /* Assume only core siblings share cache with this CPU */ in update_mask_by_l2()
1403 for_each_cpu(i, cpu_sibling_mask(cpu)) in update_mask_by_l2()
1404 set_cpus_related(cpu, i, cpu_l2_cache_mask); in update_mask_by_l2()
1409 cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu)); in update_mask_by_l2()
1412 or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask); in update_mask_by_l2()
1414 /* Skip all CPUs already part of current CPU l2-cache mask */ in update_mask_by_l2()
1415 cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu)); in update_mask_by_l2()
1419 * when updating the marks the current CPU has not been marked in update_mask_by_l2()
1424 /* Skip all CPUs already part of current CPU l2-cache */ in update_mask_by_l2()
1426 or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask); in update_mask_by_l2()
1440 static void remove_cpu_from_masks(int cpu) in remove_cpu_from_masks() argument
1445 unmap_cpu_from_node(cpu); in remove_cpu_from_masks()
1450 for_each_cpu(i, mask_fn(cpu)) { in remove_cpu_from_masks()
1451 set_cpus_unrelated(cpu, i, cpu_l2_cache_mask); in remove_cpu_from_masks()
1452 set_cpus_unrelated(cpu, i, cpu_sibling_mask); in remove_cpu_from_masks()
1454 set_cpus_unrelated(cpu, i, cpu_smallcore_mask); in remove_cpu_from_masks()
1457 for_each_cpu(i, cpu_core_mask(cpu)) in remove_cpu_from_masks()
1458 set_cpus_unrelated(cpu, i, cpu_core_mask); in remove_cpu_from_masks()
1461 for_each_cpu(i, cpu_coregroup_mask(cpu)) in remove_cpu_from_masks()
1462 set_cpus_unrelated(cpu, i, cpu_coregroup_mask); in remove_cpu_from_masks()
1467 static inline void add_cpu_to_smallcore_masks(int cpu) in add_cpu_to_smallcore_masks() argument
1474 cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu)); in add_cpu_to_smallcore_masks()
1476 for_each_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)) { in add_cpu_to_smallcore_masks()
1478 set_cpus_related(i, cpu, cpu_smallcore_mask); in add_cpu_to_smallcore_masks()
1482 static void update_coregroup_mask(int cpu, cpumask_var_t *mask) in update_coregroup_mask() argument
1485 int coregroup_id = cpu_to_coregroup_id(cpu); in update_coregroup_mask()
1492 /* Assume only siblings are part of this CPU's coregroup */ in update_coregroup_mask()
1493 for_each_cpu(i, submask_fn(cpu)) in update_coregroup_mask()
1494 set_cpus_related(cpu, i, cpu_coregroup_mask); in update_coregroup_mask()
1499 cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu)); in update_coregroup_mask()
1502 or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask); in update_coregroup_mask()
1505 cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu)); in update_coregroup_mask()
1510 or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask); in update_coregroup_mask()
1518 static void add_cpu_to_masks(int cpu) in add_cpu_to_masks() argument
1521 int first_thread = cpu_first_thread_sibling(cpu); in add_cpu_to_masks()
1528 * This CPU will not be in the online mask yet so we need to manually in add_cpu_to_masks()
1531 map_cpu_to_node(cpu, cpu_to_node(cpu)); in add_cpu_to_masks()
1532 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); in add_cpu_to_masks()
1533 cpumask_set_cpu(cpu, cpu_core_mask(cpu)); in add_cpu_to_masks()
1537 set_cpus_related(i, cpu, cpu_sibling_mask); in add_cpu_to_masks()
1539 add_cpu_to_smallcore_masks(cpu); in add_cpu_to_masks()
1541 /* In CPU-hotplug path, hence use GFP_ATOMIC */ in add_cpu_to_masks()
1542 ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu)); in add_cpu_to_masks()
1543 update_mask_by_l2(cpu, &mask); in add_cpu_to_masks()
1546 update_coregroup_mask(cpu, &mask); in add_cpu_to_masks()
1549 chip_id = cpu_to_chip_id(cpu); in add_cpu_to_masks()
1555 or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask); in add_cpu_to_masks()
1557 /* Skip all CPUs already part of current CPU core mask */ in add_cpu_to_masks()
1558 cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu)); in add_cpu_to_masks()
1562 cpumask_and(mask, mask, cpu_cpu_mask(cpu)); in add_cpu_to_masks()
1566 or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask); in add_cpu_to_masks()
1579 unsigned int cpu = raw_smp_processor_id(); in start_secondary() local
1588 smp_store_cpu_info(cpu); in start_secondary()
1590 rcu_cpu_starting(cpu); in start_secondary()
1591 cpu_callin_map[cpu] = 1; in start_secondary()
1594 smp_ops->setup_cpu(cpu); in start_secondary()
1606 set_numa_node(numa_cpu_lookup_table[cpu]); in start_secondary()
1607 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); in start_secondary()
1609 /* Update topology CPU masks */ in start_secondary()
1610 add_cpu_to_masks(cpu); in start_secondary()
1618 struct cpumask *mask = cpu_l2_cache_mask(cpu); in start_secondary()
1623 if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu))) in start_secondary()
1628 notify_cpu_starting(cpu); in start_secondary()
1629 set_cpu_online(cpu, true); in start_secondary()
1690 * We are running pinned to the boot CPU, see rest_init(). in smp_cpus_done()
1707 int cpu = smp_processor_id(); in __cpu_disable() local
1720 remove_cpu_from_masks(cpu); in __cpu_disable()
1725 void __cpu_die(unsigned int cpu) in __cpu_die() argument
1728 smp_ops->cpu_die(cpu); in __cpu_die()