Lines Matching full:cpu

6  * very straightforward.  Bringing a CPU up is simply a matter of
11 * Because virtual CPUs can be scheduled onto any real CPU, there's no
31 #include <asm/cpu.h>
62 int cpu; in cpu_bringup() local
73 cpu = smp_processor_id(); in cpu_bringup()
74 smp_store_cpu_info(cpu); in cpu_bringup()
75 cpu_data(cpu).x86_max_cores = 1; in cpu_bringup()
76 set_cpu_sibling_map(cpu); in cpu_bringup()
82 notify_cpu_starting(cpu); in cpu_bringup()
84 set_cpu_online(cpu, true); in cpu_bringup()
86 cpu_set_state_online(cpu); /* Implies full memory barrier. */ in cpu_bringup()
98 void xen_smp_intr_free_pv(unsigned int cpu) in xen_smp_intr_free_pv() argument
100 if (per_cpu(xen_irq_work, cpu).irq >= 0) { in xen_smp_intr_free_pv()
101 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); in xen_smp_intr_free_pv()
102 per_cpu(xen_irq_work, cpu).irq = -1; in xen_smp_intr_free_pv()
103 kfree(per_cpu(xen_irq_work, cpu).name); in xen_smp_intr_free_pv()
104 per_cpu(xen_irq_work, cpu).name = NULL; in xen_smp_intr_free_pv()
107 if (per_cpu(xen_pmu_irq, cpu).irq >= 0) { in xen_smp_intr_free_pv()
108 unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL); in xen_smp_intr_free_pv()
109 per_cpu(xen_pmu_irq, cpu).irq = -1; in xen_smp_intr_free_pv()
110 kfree(per_cpu(xen_pmu_irq, cpu).name); in xen_smp_intr_free_pv()
111 per_cpu(xen_pmu_irq, cpu).name = NULL; in xen_smp_intr_free_pv()
115 int xen_smp_intr_init_pv(unsigned int cpu) in xen_smp_intr_init_pv() argument
120 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); in xen_smp_intr_init_pv()
122 cpu, in xen_smp_intr_init_pv()
129 per_cpu(xen_irq_work, cpu).irq = rc; in xen_smp_intr_init_pv()
130 per_cpu(xen_irq_work, cpu).name = callfunc_name; in xen_smp_intr_init_pv()
133 pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu); in xen_smp_intr_init_pv()
134 rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu, in xen_smp_intr_init_pv()
140 per_cpu(xen_pmu_irq, cpu).irq = rc; in xen_smp_intr_init_pv()
141 per_cpu(xen_pmu_irq, cpu).name = pmu_name; in xen_smp_intr_init_pv()
147 xen_smp_intr_free_pv(cpu); in xen_smp_intr_init_pv()
176 * normally is not a problem, except when CPU hotplugging in _get_smp_config()
193 /* We've switched to the "real" per-cpu gdt, so make in xen_pv_smp_prepare_boot_cpu()
210 unsigned cpu; in xen_pv_smp_prepare_cpus() local
241 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) in xen_pv_smp_prepare_cpus()
243 set_cpu_possible(cpu, false); in xen_pv_smp_prepare_cpus()
246 for_each_possible_cpu(cpu) in xen_pv_smp_prepare_cpus()
247 set_cpu_present(cpu, true); in xen_pv_smp_prepare_cpus()
251 cpu_initialize_context(unsigned int cpu, struct task_struct *idle) in cpu_initialize_context() argument
258 cpumask_set_cpu(cpu, cpu_callout_mask); in cpu_initialize_context()
259 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) in cpu_initialize_context()
264 cpumask_clear_cpu(cpu, xen_cpu_initialized_map); in cpu_initialize_context()
265 cpumask_clear_cpu(cpu, cpu_callout_mask); in cpu_initialize_context()
269 gdt = get_cpu_gdt_rw(cpu); in cpu_initialize_context()
272 * Bring up the CPU in cpu_bringup_and_idle() with the stack in cpu_initialize_context()
304 ctxt->gs_base_kernel = per_cpu_offset(cpu); in cpu_initialize_context()
309 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); in cpu_initialize_context()
312 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt)) in cpu_initialize_context()
319 static int xen_pv_cpu_up(unsigned int cpu, struct task_struct *idle) in xen_pv_cpu_up() argument
323 rc = common_cpu_up(cpu, idle); in xen_pv_cpu_up()
327 xen_setup_runstate_info(cpu); in xen_pv_cpu_up()
333 rc = cpu_check_up_prepare(cpu); in xen_pv_cpu_up()
338 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; in xen_pv_cpu_up()
340 rc = cpu_initialize_context(cpu, idle); in xen_pv_cpu_up()
344 xen_pmu_init(cpu); in xen_pv_cpu_up()
346 rc = HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL); in xen_pv_cpu_up()
349 while (cpu_report_state(cpu) != CPU_ONLINE) in xen_pv_cpu_up()
358 unsigned int cpu = smp_processor_id(); in xen_pv_cpu_disable() local
359 if (cpu == 0) in xen_pv_cpu_disable()
368 static void xen_pv_cpu_die(unsigned int cpu) in xen_pv_cpu_die() argument
371 xen_vcpu_nr(cpu), NULL)) { in xen_pv_cpu_die()
376 if (common_cpu_die(cpu) == 0) { in xen_pv_cpu_die()
377 xen_smp_intr_free(cpu); in xen_pv_cpu_die()
378 xen_uninit_lock_cpu(cpu); in xen_pv_cpu_die()
379 xen_teardown_timer(cpu); in xen_pv_cpu_die()
380 xen_pmu_finish(cpu); in xen_pv_cpu_die()
390 * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down) in xen_pv_play_dead()
407 static void xen_pv_cpu_die(unsigned int cpu) in xen_pv_cpu_die() argument
420 int cpu = smp_processor_id(); in stop_self() local
426 set_cpu_online(cpu, false); in stop_self()
428 HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL); in stop_self()