1 /*
2 * HyperV Detection code.
3 *
4 * Copyright (C) 2010, Novell, Inc.
5 * Author : K. Y. Srinivasan <ksrinivasan@novell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 */
12
13 #include <linux/types.h>
14 #include <linux/time.h>
15 #include <linux/clocksource.h>
16 #include <linux/init.h>
17 #include <linux/export.h>
18 #include <linux/hardirq.h>
19 #include <linux/efi.h>
20 #include <linux/interrupt.h>
21 #include <linux/irq.h>
22 #include <linux/kexec.h>
23 #include <asm/processor.h>
24 #include <asm/hypervisor.h>
25 #include <asm/hyperv-tlfs.h>
26 #include <asm/mshyperv.h>
27 #include <asm/desc.h>
28 #include <asm/irq_regs.h>
29 #include <asm/i8259.h>
30 #include <asm/apic.h>
31 #include <asm/timer.h>
32 #include <asm/reboot.h>
33 #include <asm/nmi.h>
34
35 struct ms_hyperv_info ms_hyperv;
36 EXPORT_SYMBOL_GPL(ms_hyperv);
37
38 #if IS_ENABLED(CONFIG_HYPERV)
39 static void (*vmbus_handler)(void);
40 static void (*hv_stimer0_handler)(void);
41 static void (*hv_kexec_handler)(void);
42 static void (*hv_crash_handler)(struct pt_regs *regs);
43
hyperv_vector_handler(struct pt_regs * regs)44 __visible void __irq_entry hyperv_vector_handler(struct pt_regs *regs)
45 {
46 struct pt_regs *old_regs = set_irq_regs(regs);
47
48 entering_irq();
49 inc_irq_stat(irq_hv_callback_count);
50 if (vmbus_handler)
51 vmbus_handler();
52
53 if (ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED)
54 ack_APIC_irq();
55
56 exiting_irq();
57 set_irq_regs(old_regs);
58 }
59
hv_setup_vmbus_irq(void (* handler)(void))60 void hv_setup_vmbus_irq(void (*handler)(void))
61 {
62 vmbus_handler = handler;
63 }
64
hv_remove_vmbus_irq(void)65 void hv_remove_vmbus_irq(void)
66 {
67 /* We have no way to deallocate the interrupt gate */
68 vmbus_handler = NULL;
69 }
70 EXPORT_SYMBOL_GPL(hv_setup_vmbus_irq);
71 EXPORT_SYMBOL_GPL(hv_remove_vmbus_irq);
72
73 /*
74 * Routines to do per-architecture handling of stimer0
75 * interrupts when in Direct Mode
76 */
77
hv_stimer0_vector_handler(struct pt_regs * regs)78 __visible void __irq_entry hv_stimer0_vector_handler(struct pt_regs *regs)
79 {
80 struct pt_regs *old_regs = set_irq_regs(regs);
81
82 entering_irq();
83 inc_irq_stat(hyperv_stimer0_count);
84 if (hv_stimer0_handler)
85 hv_stimer0_handler();
86 ack_APIC_irq();
87
88 exiting_irq();
89 set_irq_regs(old_regs);
90 }
91
hv_setup_stimer0_irq(int * irq,int * vector,void (* handler)(void))92 int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void))
93 {
94 *vector = HYPERV_STIMER0_VECTOR;
95 *irq = 0; /* Unused on x86/x64 */
96 hv_stimer0_handler = handler;
97 return 0;
98 }
99 EXPORT_SYMBOL_GPL(hv_setup_stimer0_irq);
100
hv_remove_stimer0_irq(int irq)101 void hv_remove_stimer0_irq(int irq)
102 {
103 /* We have no way to deallocate the interrupt gate */
104 hv_stimer0_handler = NULL;
105 }
106 EXPORT_SYMBOL_GPL(hv_remove_stimer0_irq);
107
hv_setup_kexec_handler(void (* handler)(void))108 void hv_setup_kexec_handler(void (*handler)(void))
109 {
110 hv_kexec_handler = handler;
111 }
112 EXPORT_SYMBOL_GPL(hv_setup_kexec_handler);
113
hv_remove_kexec_handler(void)114 void hv_remove_kexec_handler(void)
115 {
116 hv_kexec_handler = NULL;
117 }
118 EXPORT_SYMBOL_GPL(hv_remove_kexec_handler);
119
hv_setup_crash_handler(void (* handler)(struct pt_regs * regs))120 void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs))
121 {
122 hv_crash_handler = handler;
123 }
124 EXPORT_SYMBOL_GPL(hv_setup_crash_handler);
125
hv_remove_crash_handler(void)126 void hv_remove_crash_handler(void)
127 {
128 hv_crash_handler = NULL;
129 }
130 EXPORT_SYMBOL_GPL(hv_remove_crash_handler);
131
132 #ifdef CONFIG_KEXEC_CORE
hv_machine_shutdown(void)133 static void hv_machine_shutdown(void)
134 {
135 if (kexec_in_progress && hv_kexec_handler)
136 hv_kexec_handler();
137 native_machine_shutdown();
138 }
139
hv_machine_crash_shutdown(struct pt_regs * regs)140 static void hv_machine_crash_shutdown(struct pt_regs *regs)
141 {
142 if (hv_crash_handler)
143 hv_crash_handler(regs);
144 native_machine_crash_shutdown(regs);
145 }
146 #endif /* CONFIG_KEXEC_CORE */
147 #endif /* CONFIG_HYPERV */
148
ms_hyperv_platform(void)149 static uint32_t __init ms_hyperv_platform(void)
150 {
151 u32 eax;
152 u32 hyp_signature[3];
153
154 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
155 return 0;
156
157 cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
158 &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
159
160 if (eax >= HYPERV_CPUID_MIN &&
161 eax <= HYPERV_CPUID_MAX &&
162 !memcmp("Microsoft Hv", hyp_signature, 12))
163 return HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
164
165 return 0;
166 }
167
hv_get_nmi_reason(void)168 static unsigned char hv_get_nmi_reason(void)
169 {
170 return 0;
171 }
172
173 #ifdef CONFIG_X86_LOCAL_APIC
174 /*
175 * Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes
176 * it dificult to process CHANNELMSG_UNLOAD in case of crash. Handle
177 * unknown NMI on the first CPU which gets it.
178 */
hv_nmi_unknown(unsigned int val,struct pt_regs * regs)179 static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
180 {
181 static atomic_t nmi_cpu = ATOMIC_INIT(-1);
182
183 if (!unknown_nmi_panic)
184 return NMI_DONE;
185
186 if (atomic_cmpxchg(&nmi_cpu, -1, raw_smp_processor_id()) != -1)
187 return NMI_HANDLED;
188
189 return NMI_DONE;
190 }
191 #endif
192
hv_get_tsc_khz(void)193 static unsigned long hv_get_tsc_khz(void)
194 {
195 unsigned long freq;
196
197 rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq);
198
199 return freq / 1000;
200 }
201
ms_hyperv_init_platform(void)202 static void __init ms_hyperv_init_platform(void)
203 {
204 int hv_host_info_eax;
205 int hv_host_info_ebx;
206 int hv_host_info_ecx;
207 int hv_host_info_edx;
208
209 /*
210 * Extract the features and hints
211 */
212 ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES);
213 ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
214 ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
215
216 pr_info("Hyper-V: features 0x%x, hints 0x%x\n",
217 ms_hyperv.features, ms_hyperv.hints);
218
219 ms_hyperv.max_vp_index = cpuid_eax(HYPERV_CPUID_IMPLEMENT_LIMITS);
220 ms_hyperv.max_lp_index = cpuid_ebx(HYPERV_CPUID_IMPLEMENT_LIMITS);
221
222 pr_debug("Hyper-V: max %u virtual processors, %u logical processors\n",
223 ms_hyperv.max_vp_index, ms_hyperv.max_lp_index);
224
225 /*
226 * Extract host information.
227 */
228 if (cpuid_eax(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS) >=
229 HYPERV_CPUID_VERSION) {
230 hv_host_info_eax = cpuid_eax(HYPERV_CPUID_VERSION);
231 hv_host_info_ebx = cpuid_ebx(HYPERV_CPUID_VERSION);
232 hv_host_info_ecx = cpuid_ecx(HYPERV_CPUID_VERSION);
233 hv_host_info_edx = cpuid_edx(HYPERV_CPUID_VERSION);
234
235 pr_info("Hyper-V Host Build:%d-%d.%d-%d-%d.%d\n",
236 hv_host_info_eax, hv_host_info_ebx >> 16,
237 hv_host_info_ebx & 0xFFFF, hv_host_info_ecx,
238 hv_host_info_edx >> 24, hv_host_info_edx & 0xFFFFFF);
239 }
240
241 if (ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS &&
242 ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
243 x86_platform.calibrate_tsc = hv_get_tsc_khz;
244 x86_platform.calibrate_cpu = hv_get_tsc_khz;
245 }
246
247 if (ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED) {
248 ms_hyperv.nested_features =
249 cpuid_eax(HYPERV_CPUID_NESTED_FEATURES);
250 }
251
252 #ifdef CONFIG_X86_LOCAL_APIC
253 if (ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS &&
254 ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
255 /*
256 * Get the APIC frequency.
257 */
258 u64 hv_lapic_frequency;
259
260 rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency);
261 hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ);
262 lapic_timer_frequency = hv_lapic_frequency;
263 pr_info("Hyper-V: LAPIC Timer Frequency: %#x\n",
264 lapic_timer_frequency);
265 }
266
267 register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST,
268 "hv_nmi_unknown");
269 #endif
270
271 #ifdef CONFIG_X86_IO_APIC
272 no_timer_check = 1;
273 #endif
274
275 #if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE)
276 machine_ops.shutdown = hv_machine_shutdown;
277 machine_ops.crash_shutdown = hv_machine_crash_shutdown;
278 #endif
279 mark_tsc_unstable("running on Hyper-V");
280
281 /*
282 * Generation 2 instances don't support reading the NMI status from
283 * 0x61 port.
284 */
285 if (efi_enabled(EFI_BOOT))
286 x86_platform.get_nmi_reason = hv_get_nmi_reason;
287
288 #if IS_ENABLED(CONFIG_HYPERV)
289 /*
290 * Setup the hook to get control post apic initialization.
291 */
292 x86_platform.apic_post_init = hyperv_init;
293 hyperv_setup_mmu_ops();
294 /* Setup the IDT for hypervisor callback */
295 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
296
297 /* Setup the IDT for reenlightenment notifications */
298 if (ms_hyperv.features & HV_X64_ACCESS_REENLIGHTENMENT)
299 alloc_intr_gate(HYPERV_REENLIGHTENMENT_VECTOR,
300 hyperv_reenlightenment_vector);
301
302 /* Setup the IDT for stimer0 */
303 if (ms_hyperv.misc_features & HV_STIMER_DIRECT_MODE_AVAILABLE)
304 alloc_intr_gate(HYPERV_STIMER0_VECTOR,
305 hv_stimer0_callback_vector);
306 #endif
307 }
308
309 const __initconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
310 .name = "Microsoft Hyper-V",
311 .detect = ms_hyperv_platform,
312 .type = X86_HYPER_MS_HYPERV,
313 .init.init_platform = ms_hyperv_init_platform,
314 };
315