1 /*
2 * Uniprocessor-only support functions. The counterpart to kernel/smp.c
3 */
4
5 #include <linux/interrupt.h>
6 #include <linux/kernel.h>
7 #include <linux/export.h>
8 #include <linux/smp.h>
9 #include <linux/hypervisor.h>
10
smp_call_function_single(int cpu,void (* func)(void * info),void * info,int wait)11 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
12 int wait)
13 {
14 unsigned long flags;
15
16 WARN_ON(cpu != 0);
17
18 local_irq_save(flags);
19 func(info);
20 local_irq_restore(flags);
21
22 return 0;
23 }
24 EXPORT_SYMBOL(smp_call_function_single);
25
smp_call_function_single_async(int cpu,call_single_data_t * csd)26 int smp_call_function_single_async(int cpu, call_single_data_t *csd)
27 {
28 unsigned long flags;
29
30 local_irq_save(flags);
31 csd->func(csd->info);
32 local_irq_restore(flags);
33 return 0;
34 }
35 EXPORT_SYMBOL(smp_call_function_single_async);
36
on_each_cpu(smp_call_func_t func,void * info,int wait)37 int on_each_cpu(smp_call_func_t func, void *info, int wait)
38 {
39 unsigned long flags;
40
41 local_irq_save(flags);
42 func(info);
43 local_irq_restore(flags);
44 return 0;
45 }
46 EXPORT_SYMBOL(on_each_cpu);
47
48 /*
49 * Note we still need to test the mask even for UP
50 * because we actually can get an empty mask from
51 * code that on SMP might call us without the local
52 * CPU in the mask.
53 */
on_each_cpu_mask(const struct cpumask * mask,smp_call_func_t func,void * info,bool wait)54 void on_each_cpu_mask(const struct cpumask *mask,
55 smp_call_func_t func, void *info, bool wait)
56 {
57 unsigned long flags;
58
59 if (cpumask_test_cpu(0, mask)) {
60 local_irq_save(flags);
61 func(info);
62 local_irq_restore(flags);
63 }
64 }
65 EXPORT_SYMBOL(on_each_cpu_mask);
66
67 /*
68 * Preemption is disabled here to make sure the cond_func is called under the
69 * same condtions in UP and SMP.
70 */
on_each_cpu_cond(bool (* cond_func)(int cpu,void * info),smp_call_func_t func,void * info,bool wait,gfp_t gfp_flags)71 void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
72 smp_call_func_t func, void *info, bool wait,
73 gfp_t gfp_flags)
74 {
75 unsigned long flags;
76
77 preempt_disable();
78 if (cond_func(0, info)) {
79 local_irq_save(flags);
80 func(info);
81 local_irq_restore(flags);
82 }
83 preempt_enable();
84 }
85 EXPORT_SYMBOL(on_each_cpu_cond);
86
smp_call_on_cpu(unsigned int cpu,int (* func)(void *),void * par,bool phys)87 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
88 {
89 int ret;
90
91 if (cpu != 0)
92 return -ENXIO;
93
94 if (phys)
95 hypervisor_pin_vcpu(0);
96 ret = func(par);
97 if (phys)
98 hypervisor_pin_vcpu(-1);
99
100 return ret;
101 }
102 EXPORT_SYMBOL_GPL(smp_call_on_cpu);
103