1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  * Copyright (C) 2009 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
4  */
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <stdint.h>
9 
10 #include "event-parse.h"
11 #include "trace-seq.h"
12 
13 #ifdef HAVE_UDIS86
14 
15 #include <udis86.h>
16 
17 static ud_t ud;
18 
init_disassembler(void)19 static void init_disassembler(void)
20 {
21 	ud_init(&ud);
22 	ud_set_syntax(&ud, UD_SYN_ATT);
23 }
24 
disassemble(unsigned char * insn,int len,uint64_t rip,int cr0_pe,int eflags_vm,int cs_d,int cs_l)25 static const char *disassemble(unsigned char *insn, int len, uint64_t rip,
26 			       int cr0_pe, int eflags_vm,
27 			       int cs_d, int cs_l)
28 {
29 	int mode;
30 
31 	if (!cr0_pe)
32 		mode = 16;
33 	else if (eflags_vm)
34 		mode = 16;
35 	else if (cs_l)
36 		mode = 64;
37 	else if (cs_d)
38 		mode = 32;
39 	else
40 		mode = 16;
41 
42 	ud_set_pc(&ud, rip);
43 	ud_set_mode(&ud, mode);
44 	ud_set_input_buffer(&ud, insn, len);
45 	ud_disassemble(&ud);
46 	return ud_insn_asm(&ud);
47 }
48 
49 #else
50 
init_disassembler(void)51 static void init_disassembler(void)
52 {
53 }
54 
disassemble(unsigned char * insn,int len,uint64_t rip,int cr0_pe,int eflags_vm,int cs_d,int cs_l)55 static const char *disassemble(unsigned char *insn, int len, uint64_t rip,
56 			       int cr0_pe, int eflags_vm,
57 			       int cs_d, int cs_l)
58 {
59 	static char out[15*3+1];
60 	int i;
61 
62 	for (i = 0; i < len; ++i)
63 		sprintf(out + i * 3, "%02x ", insn[i]);
64 	out[len*3-1] = '\0';
65 	return out;
66 }
67 
68 #endif
69 
70 
71 #define VMX_EXIT_REASONS			\
72 	_ER(EXCEPTION_NMI,	 0)		\
73 	_ER(EXTERNAL_INTERRUPT,	 1)		\
74 	_ER(TRIPLE_FAULT,	 2)		\
75 	_ER(PENDING_INTERRUPT,	 7)		\
76 	_ER(NMI_WINDOW,		 8)		\
77 	_ER(TASK_SWITCH,	 9)		\
78 	_ER(CPUID,		 10)		\
79 	_ER(HLT,		 12)		\
80 	_ER(INVD,		 13)		\
81 	_ER(INVLPG,		 14)		\
82 	_ER(RDPMC,		 15)		\
83 	_ER(RDTSC,		 16)		\
84 	_ER(VMCALL,		 18)		\
85 	_ER(VMCLEAR,		 19)		\
86 	_ER(VMLAUNCH,		 20)		\
87 	_ER(VMPTRLD,		 21)		\
88 	_ER(VMPTRST,		 22)		\
89 	_ER(VMREAD,		 23)		\
90 	_ER(VMRESUME,		 24)		\
91 	_ER(VMWRITE,		 25)		\
92 	_ER(VMOFF,		 26)		\
93 	_ER(VMON,		 27)		\
94 	_ER(CR_ACCESS,		 28)		\
95 	_ER(DR_ACCESS,		 29)		\
96 	_ER(IO_INSTRUCTION,	 30)		\
97 	_ER(MSR_READ,		 31)		\
98 	_ER(MSR_WRITE,		 32)		\
99 	_ER(MWAIT_INSTRUCTION,	 36)		\
100 	_ER(MONITOR_INSTRUCTION, 39)		\
101 	_ER(PAUSE_INSTRUCTION,	 40)		\
102 	_ER(MCE_DURING_VMENTRY,	 41)		\
103 	_ER(TPR_BELOW_THRESHOLD, 43)		\
104 	_ER(APIC_ACCESS,	 44)		\
105 	_ER(EOI_INDUCED,	 45)		\
106 	_ER(EPT_VIOLATION,	 48)		\
107 	_ER(EPT_MISCONFIG,	 49)		\
108 	_ER(INVEPT,		 50)		\
109 	_ER(PREEMPTION_TIMER,	 52)		\
110 	_ER(WBINVD,		 54)		\
111 	_ER(XSETBV,		 55)		\
112 	_ER(APIC_WRITE,		 56)		\
113 	_ER(INVPCID,		 58)		\
114 	_ER(PML_FULL,		 62)		\
115 	_ER(XSAVES,		 63)		\
116 	_ER(XRSTORS,		 64)
117 
118 #define SVM_EXIT_REASONS \
119 	_ER(EXIT_READ_CR0,	0x000)		\
120 	_ER(EXIT_READ_CR3,	0x003)		\
121 	_ER(EXIT_READ_CR4,	0x004)		\
122 	_ER(EXIT_READ_CR8,	0x008)		\
123 	_ER(EXIT_WRITE_CR0,	0x010)		\
124 	_ER(EXIT_WRITE_CR3,	0x013)		\
125 	_ER(EXIT_WRITE_CR4,	0x014)		\
126 	_ER(EXIT_WRITE_CR8,	0x018)		\
127 	_ER(EXIT_READ_DR0,	0x020)		\
128 	_ER(EXIT_READ_DR1,	0x021)		\
129 	_ER(EXIT_READ_DR2,	0x022)		\
130 	_ER(EXIT_READ_DR3,	0x023)		\
131 	_ER(EXIT_READ_DR4,	0x024)		\
132 	_ER(EXIT_READ_DR5,	0x025)		\
133 	_ER(EXIT_READ_DR6,	0x026)		\
134 	_ER(EXIT_READ_DR7,	0x027)		\
135 	_ER(EXIT_WRITE_DR0,	0x030)		\
136 	_ER(EXIT_WRITE_DR1,	0x031)		\
137 	_ER(EXIT_WRITE_DR2,	0x032)		\
138 	_ER(EXIT_WRITE_DR3,	0x033)		\
139 	_ER(EXIT_WRITE_DR4,	0x034)		\
140 	_ER(EXIT_WRITE_DR5,	0x035)		\
141 	_ER(EXIT_WRITE_DR6,	0x036)		\
142 	_ER(EXIT_WRITE_DR7,	0x037)		\
143 	_ER(EXIT_EXCP_DE,	0x040)		\
144 	_ER(EXIT_EXCP_DB,	0x041)		\
145 	_ER(EXIT_EXCP_BP,	0x043)		\
146 	_ER(EXIT_EXCP_OF,	0x044)		\
147 	_ER(EXIT_EXCP_BR,	0x045)		\
148 	_ER(EXIT_EXCP_UD,	0x046)		\
149 	_ER(EXIT_EXCP_NM,	0x047)		\
150 	_ER(EXIT_EXCP_DF,	0x048)		\
151 	_ER(EXIT_EXCP_TS,	0x04a)		\
152 	_ER(EXIT_EXCP_NP,	0x04b)		\
153 	_ER(EXIT_EXCP_SS,	0x04c)		\
154 	_ER(EXIT_EXCP_GP,	0x04d)		\
155 	_ER(EXIT_EXCP_PF,	0x04e)		\
156 	_ER(EXIT_EXCP_MF,	0x050)		\
157 	_ER(EXIT_EXCP_AC,	0x051)		\
158 	_ER(EXIT_EXCP_MC,	0x052)		\
159 	_ER(EXIT_EXCP_XF,	0x053)		\
160 	_ER(EXIT_INTR,		0x060)		\
161 	_ER(EXIT_NMI,		0x061)		\
162 	_ER(EXIT_SMI,		0x062)		\
163 	_ER(EXIT_INIT,		0x063)		\
164 	_ER(EXIT_VINTR,		0x064)		\
165 	_ER(EXIT_CR0_SEL_WRITE,	0x065)		\
166 	_ER(EXIT_IDTR_READ,	0x066)		\
167 	_ER(EXIT_GDTR_READ,	0x067)		\
168 	_ER(EXIT_LDTR_READ,	0x068)		\
169 	_ER(EXIT_TR_READ,	0x069)		\
170 	_ER(EXIT_IDTR_WRITE,	0x06a)		\
171 	_ER(EXIT_GDTR_WRITE,	0x06b)		\
172 	_ER(EXIT_LDTR_WRITE,	0x06c)		\
173 	_ER(EXIT_TR_WRITE,	0x06d)		\
174 	_ER(EXIT_RDTSC,		0x06e)		\
175 	_ER(EXIT_RDPMC,		0x06f)		\
176 	_ER(EXIT_PUSHF,		0x070)		\
177 	_ER(EXIT_POPF,		0x071)		\
178 	_ER(EXIT_CPUID,		0x072)		\
179 	_ER(EXIT_RSM,		0x073)		\
180 	_ER(EXIT_IRET,		0x074)		\
181 	_ER(EXIT_SWINT,		0x075)		\
182 	_ER(EXIT_INVD,		0x076)		\
183 	_ER(EXIT_PAUSE,		0x077)		\
184 	_ER(EXIT_HLT,		0x078)		\
185 	_ER(EXIT_INVLPG,	0x079)		\
186 	_ER(EXIT_INVLPGA,	0x07a)		\
187 	_ER(EXIT_IOIO,		0x07b)		\
188 	_ER(EXIT_MSR,		0x07c)		\
189 	_ER(EXIT_TASK_SWITCH,	0x07d)		\
190 	_ER(EXIT_FERR_FREEZE,	0x07e)		\
191 	_ER(EXIT_SHUTDOWN,	0x07f)		\
192 	_ER(EXIT_VMRUN,		0x080)		\
193 	_ER(EXIT_VMMCALL,	0x081)		\
194 	_ER(EXIT_VMLOAD,	0x082)		\
195 	_ER(EXIT_VMSAVE,	0x083)		\
196 	_ER(EXIT_STGI,		0x084)		\
197 	_ER(EXIT_CLGI,		0x085)		\
198 	_ER(EXIT_SKINIT,	0x086)		\
199 	_ER(EXIT_RDTSCP,	0x087)		\
200 	_ER(EXIT_ICEBP,		0x088)		\
201 	_ER(EXIT_WBINVD,	0x089)		\
202 	_ER(EXIT_MONITOR,	0x08a)		\
203 	_ER(EXIT_MWAIT,		0x08b)		\
204 	_ER(EXIT_MWAIT_COND,	0x08c)		\
205 	_ER(EXIT_XSETBV,	0x08d)		\
206 	_ER(EXIT_NPF, 		0x400)		\
207 	_ER(EXIT_AVIC_INCOMPLETE_IPI,		0x401)	\
208 	_ER(EXIT_AVIC_UNACCELERATED_ACCESS,	0x402)	\
209 	_ER(EXIT_ERR,		-1)
210 
211 #define _ER(reason, val)	{ #reason, val },
212 struct str_values {
213 	const char	*str;
214 	int		val;
215 };
216 
217 static struct str_values vmx_exit_reasons[] = {
218 	VMX_EXIT_REASONS
219 	{ NULL, -1}
220 };
221 
222 static struct str_values svm_exit_reasons[] = {
223 	SVM_EXIT_REASONS
224 	{ NULL, -1}
225 };
226 
227 static struct isa_exit_reasons {
228 	unsigned isa;
229 	struct str_values *strings;
230 } isa_exit_reasons[] = {
231 	{ .isa = 1, .strings = vmx_exit_reasons },
232 	{ .isa = 2, .strings = svm_exit_reasons },
233 	{ }
234 };
235 
find_exit_reason(unsigned isa,int val)236 static const char *find_exit_reason(unsigned isa, int val)
237 {
238 	struct str_values *strings = NULL;
239 	int i;
240 
241 	for (i = 0; isa_exit_reasons[i].strings; ++i)
242 		if (isa_exit_reasons[i].isa == isa) {
243 			strings = isa_exit_reasons[i].strings;
244 			break;
245 		}
246 	if (!strings)
247 		return "UNKNOWN-ISA";
248 	for (i = 0; strings[i].str; i++)
249 		if (strings[i].val == val)
250 			break;
251 
252 	return strings[i].str;
253 }
254 
print_exit_reason(struct trace_seq * s,struct tep_record * record,struct tep_event * event,const char * field)255 static int print_exit_reason(struct trace_seq *s, struct tep_record *record,
256 			     struct tep_event *event, const char *field)
257 {
258 	unsigned long long isa;
259 	unsigned long long val;
260 	const char *reason;
261 
262 	if (tep_get_field_val(s, event, field, record, &val, 1) < 0)
263 		return -1;
264 
265 	if (tep_get_field_val(s, event, "isa", record, &isa, 0) < 0)
266 		isa = 1;
267 
268 	reason = find_exit_reason(isa, val);
269 	if (reason)
270 		trace_seq_printf(s, "reason %s", reason);
271 	else
272 		trace_seq_printf(s, "reason UNKNOWN (%llu)", val);
273 	return 0;
274 }
275 
kvm_exit_handler(struct trace_seq * s,struct tep_record * record,struct tep_event * event,void * context)276 static int kvm_exit_handler(struct trace_seq *s, struct tep_record *record,
277 			    struct tep_event *event, void *context)
278 {
279 	unsigned long long info1 = 0, info2 = 0;
280 
281 	if (print_exit_reason(s, record, event, "exit_reason") < 0)
282 		return -1;
283 
284 	tep_print_num_field(s, " rip 0x%lx", event, "guest_rip", record, 1);
285 
286 	if (tep_get_field_val(s, event, "info1", record, &info1, 0) >= 0
287 	    && tep_get_field_val(s, event, "info2", record, &info2, 0) >= 0)
288 		trace_seq_printf(s, " info %llx %llx", info1, info2);
289 
290 	return 0;
291 }
292 
293 #define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
294 #define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
295 #define KVM_EMUL_INSN_F_CS_D   (1 << 2)
296 #define KVM_EMUL_INSN_F_CS_L   (1 << 3)
297 
kvm_emulate_insn_handler(struct trace_seq * s,struct tep_record * record,struct tep_event * event,void * context)298 static int kvm_emulate_insn_handler(struct trace_seq *s,
299 				    struct tep_record *record,
300 				    struct tep_event *event, void *context)
301 {
302 	unsigned long long rip, csbase, len, flags, failed;
303 	int llen;
304 	uint8_t *insn;
305 	const char *disasm;
306 
307 	if (tep_get_field_val(s, event, "rip", record, &rip, 1) < 0)
308 		return -1;
309 
310 	if (tep_get_field_val(s, event, "csbase", record, &csbase, 1) < 0)
311 		return -1;
312 
313 	if (tep_get_field_val(s, event, "len", record, &len, 1) < 0)
314 		return -1;
315 
316 	if (tep_get_field_val(s, event, "flags", record, &flags, 1) < 0)
317 		return -1;
318 
319 	if (tep_get_field_val(s, event, "failed", record, &failed, 1) < 0)
320 		return -1;
321 
322 	insn = tep_get_field_raw(s, event, "insn", record, &llen, 1);
323 	if (!insn)
324 		return -1;
325 
326 	disasm = disassemble(insn, len, rip,
327 			     flags & KVM_EMUL_INSN_F_CR0_PE,
328 			     flags & KVM_EMUL_INSN_F_EFL_VM,
329 			     flags & KVM_EMUL_INSN_F_CS_D,
330 			     flags & KVM_EMUL_INSN_F_CS_L);
331 
332 	trace_seq_printf(s, "%llx:%llx: %s%s", csbase, rip, disasm,
333 			 failed ? " FAIL" : "");
334 	return 0;
335 }
336 
337 
kvm_nested_vmexit_inject_handler(struct trace_seq * s,struct tep_record * record,struct tep_event * event,void * context)338 static int kvm_nested_vmexit_inject_handler(struct trace_seq *s, struct tep_record *record,
339 					    struct tep_event *event, void *context)
340 {
341 	if (print_exit_reason(s, record, event, "exit_code") < 0)
342 		return -1;
343 
344 	tep_print_num_field(s, " info1 %llx", event, "exit_info1", record, 1);
345 	tep_print_num_field(s, " info2 %llx", event, "exit_info2", record, 1);
346 	tep_print_num_field(s, " int_info %llx", event, "exit_int_info", record, 1);
347 	tep_print_num_field(s, " int_info_err %llx", event, "exit_int_info_err", record, 1);
348 
349 	return 0;
350 }
351 
kvm_nested_vmexit_handler(struct trace_seq * s,struct tep_record * record,struct tep_event * event,void * context)352 static int kvm_nested_vmexit_handler(struct trace_seq *s, struct tep_record *record,
353 				     struct tep_event *event, void *context)
354 {
355 	tep_print_num_field(s, "rip %llx ", event, "rip", record, 1);
356 
357 	return kvm_nested_vmexit_inject_handler(s, record, event, context);
358 }
359 
360 union kvm_mmu_page_role {
361 	unsigned word;
362 	struct {
363 		unsigned level:4;
364 		unsigned cr4_pae:1;
365 		unsigned quadrant:2;
366 		unsigned direct:1;
367 		unsigned access:3;
368 		unsigned invalid:1;
369 		unsigned efer_nx:1;
370 		unsigned cr0_wp:1;
371 		unsigned smep_and_not_wp:1;
372 		unsigned smap_and_not_wp:1;
373 		unsigned pad_for_nice_hex_output:8;
374 		unsigned smm:8;
375 	};
376 };
377 
kvm_mmu_print_role(struct trace_seq * s,struct tep_record * record,struct tep_event * event,void * context)378 static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record,
379 			      struct tep_event *event, void *context)
380 {
381 	unsigned long long val;
382 	static const char *access_str[] = {
383 		"---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux"
384 	};
385 	union kvm_mmu_page_role role;
386 
387 	if (tep_get_field_val(s, event, "role", record, &val, 1) < 0)
388 		return -1;
389 
390 	role.word = (int)val;
391 
392 	/*
393 	 * We can only use the structure if file is of the same
394 	 * endianness.
395 	 */
396 	if (tep_is_file_bigendian(event->tep) ==
397 	    tep_is_local_bigendian(event->tep)) {
398 
399 		trace_seq_printf(s, "%u q%u%s %s%s %spae %snxe %swp%s%s%s",
400 				 role.level,
401 				 role.quadrant,
402 				 role.direct ? " direct" : "",
403 				 access_str[role.access],
404 				 role.invalid ? " invalid" : "",
405 				 role.cr4_pae ? "" : "!",
406 				 role.efer_nx ? "" : "!",
407 				 role.cr0_wp ? "" : "!",
408 				 role.smep_and_not_wp ? " smep" : "",
409 				 role.smap_and_not_wp ? " smap" : "",
410 				 role.smm ? " smm" : "");
411 	} else
412 		trace_seq_printf(s, "WORD: %08x", role.word);
413 
414 	tep_print_num_field(s, " root %u ",  event,
415 			    "root_count", record, 1);
416 
417 	if (tep_get_field_val(s, event, "unsync", record, &val, 1) < 0)
418 		return -1;
419 
420 	trace_seq_printf(s, "%s%c",  val ? "unsync" : "sync", 0);
421 	return 0;
422 }
423 
kvm_mmu_get_page_handler(struct trace_seq * s,struct tep_record * record,struct tep_event * event,void * context)424 static int kvm_mmu_get_page_handler(struct trace_seq *s,
425 				    struct tep_record *record,
426 				    struct tep_event *event, void *context)
427 {
428 	unsigned long long val;
429 
430 	if (tep_get_field_val(s, event, "created", record, &val, 1) < 0)
431 		return -1;
432 
433 	trace_seq_printf(s, "%s ", val ? "new" : "existing");
434 
435 	if (tep_get_field_val(s, event, "gfn", record, &val, 1) < 0)
436 		return -1;
437 
438 	trace_seq_printf(s, "sp gfn %llx ", val);
439 	return kvm_mmu_print_role(s, record, event, context);
440 }
441 
442 #define PT_WRITABLE_SHIFT 1
443 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
444 
445 static unsigned long long
process_is_writable_pte(struct trace_seq * s,unsigned long long * args)446 process_is_writable_pte(struct trace_seq *s, unsigned long long *args)
447 {
448 	unsigned long pte = args[0];
449 	return pte & PT_WRITABLE_MASK;
450 }
451 
TEP_PLUGIN_LOADER(struct tep_handle * tep)452 int TEP_PLUGIN_LOADER(struct tep_handle *tep)
453 {
454 	init_disassembler();
455 
456 	tep_register_event_handler(tep, -1, "kvm", "kvm_exit",
457 				   kvm_exit_handler, NULL);
458 
459 	tep_register_event_handler(tep, -1, "kvm", "kvm_emulate_insn",
460 				   kvm_emulate_insn_handler, NULL);
461 
462 	tep_register_event_handler(tep, -1, "kvm", "kvm_nested_vmexit",
463 				   kvm_nested_vmexit_handler, NULL);
464 
465 	tep_register_event_handler(tep, -1, "kvm", "kvm_nested_vmexit_inject",
466 				   kvm_nested_vmexit_inject_handler, NULL);
467 
468 	tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_get_page",
469 				   kvm_mmu_get_page_handler, NULL);
470 
471 	tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_sync_page",
472 				   kvm_mmu_print_role, NULL);
473 
474 	tep_register_event_handler(tep, -1,
475 				   "kvmmmu", "kvm_mmu_unsync_page",
476 				   kvm_mmu_print_role, NULL);
477 
478 	tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_zap_page",
479 				   kvm_mmu_print_role, NULL);
480 
481 	tep_register_event_handler(tep, -1, "kvmmmu",
482 			"kvm_mmu_prepare_zap_page", kvm_mmu_print_role,
483 			NULL);
484 
485 	tep_register_print_function(tep,
486 				    process_is_writable_pte,
487 				    TEP_FUNC_ARG_INT,
488 				    "is_writable_pte",
489 				    TEP_FUNC_ARG_LONG,
490 				    TEP_FUNC_ARG_VOID);
491 	return 0;
492 }
493 
TEP_PLUGIN_UNLOADER(struct tep_handle * tep)494 void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
495 {
496 	tep_unregister_event_handler(tep, -1, "kvm", "kvm_exit",
497 				     kvm_exit_handler, NULL);
498 
499 	tep_unregister_event_handler(tep, -1, "kvm", "kvm_emulate_insn",
500 				     kvm_emulate_insn_handler, NULL);
501 
502 	tep_unregister_event_handler(tep, -1, "kvm", "kvm_nested_vmexit",
503 				     kvm_nested_vmexit_handler, NULL);
504 
505 	tep_unregister_event_handler(tep, -1, "kvm", "kvm_nested_vmexit_inject",
506 				     kvm_nested_vmexit_inject_handler, NULL);
507 
508 	tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_get_page",
509 				     kvm_mmu_get_page_handler, NULL);
510 
511 	tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_sync_page",
512 				     kvm_mmu_print_role, NULL);
513 
514 	tep_unregister_event_handler(tep, -1,
515 				     "kvmmmu", "kvm_mmu_unsync_page",
516 				     kvm_mmu_print_role, NULL);
517 
518 	tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_zap_page",
519 				     kvm_mmu_print_role, NULL);
520 
521 	tep_unregister_event_handler(tep, -1, "kvmmmu",
522 			"kvm_mmu_prepare_zap_page", kvm_mmu_print_role,
523 			NULL);
524 
525 	tep_unregister_print_function(tep, process_is_writable_pte,
526 				      "is_writable_pte");
527 }
528