Home
last modified time | relevance | path

Searched refs:VCPU_ID (Results 1 – 25 of 43) sorted by relevance

12

/Linux-v5.15/tools/testing/selftests/kvm/s390x/
Dresets.c16 #define VCPU_ID 3 macro
19 struct kvm_s390_irq buf[VCPU_ID + LOCAL_IRQS];
68 vcpu_get_reg(vm, VCPU_ID, &reg); in test_one_reg()
79 irqs = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_GET_IRQ_STATE, &irq_state); in assert_noirq()
95 vcpu_regs_get(vm, VCPU_ID, &regs); in assert_clear()
98 vcpu_sregs_get(vm, VCPU_ID, &sregs); in assert_clear()
101 vcpu_fpu_get(vm, VCPU_ID, &fpu); in assert_clear()
136 vcpu_sregs_get(vm, VCPU_ID, &sregs); in assert_initial()
162 vcpu_fpu_get(vm, VCPU_ID, &fpu); in assert_initial()
207 vm = vm_create_default(VCPU_ID, 0, guest_code_initial); in test_normal()
[all …]
Dsync_regs_test.c25 #define VCPU_ID 5 macro
95 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
97 run = vcpu_state(vm, VCPU_ID); in main()
101 rv = _vcpu_run(vm, VCPU_ID); in main()
105 vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; in main()
108 rv = _vcpu_run(vm, VCPU_ID); in main()
112 vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; in main()
116 rv = _vcpu_run(vm, VCPU_ID); in main()
120 vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0; in main()
123 rv = _vcpu_run(vm, VCPU_ID); in main()
[all …]
Dmemop.c16 #define VCPU_ID 1 macro
50 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
51 run = vcpu_state(vm, VCPU_ID); in main()
63 vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); in main()
66 vcpu_run(vm, VCPU_ID); in main()
81 vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); in main()
93 rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); in main()
103 rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); in main()
114 rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); in main()
124 rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); in main()
[all …]
/Linux-v5.15/tools/testing/selftests/kvm/x86_64/
Dsync_regs_test.c23 #define VCPU_ID 5 macro
108 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
110 run = vcpu_state(vm, VCPU_ID); in main()
114 rv = _vcpu_run(vm, VCPU_ID); in main()
118 vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; in main()
121 rv = _vcpu_run(vm, VCPU_ID); in main()
125 vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; in main()
129 rv = _vcpu_run(vm, VCPU_ID); in main()
133 vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0; in main()
136 rv = _vcpu_run(vm, VCPU_ID); in main()
[all …]
Devmcs_test.c20 #define VCPU_ID 5 macro
107 vcpu_events_get(vm, VCPU_ID, &events); in inject_nmi()
112 vcpu_events_set(vm, VCPU_ID, &events); in inject_nmi()
120 state = vcpu_save_state(vm, VCPU_ID); in save_restore_vm()
122 vcpu_regs_get(vm, VCPU_ID, &regs1); in save_restore_vm()
128 vm_vcpu_add(vm, VCPU_ID); in save_restore_vm()
129 vcpu_set_hv_cpuid(vm, VCPU_ID); in save_restore_vm()
130 vcpu_enable_evmcs(vm, VCPU_ID); in save_restore_vm()
131 vcpu_load_state(vm, VCPU_ID, state); in save_restore_vm()
135 vcpu_regs_get(vm, VCPU_ID, &regs2); in save_restore_vm()
[all …]
Dsmm_test.c22 #define VCPU_ID 1 macro
126 vcpu_events_get(vm, VCPU_ID, &events); in inject_smi()
131 vcpu_events_set(vm, VCPU_ID, &events); in inject_smi()
145 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
147 run = vcpu_state(vm, VCPU_ID); in main()
158 vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA); in main()
170 vcpu_args_set(vm, VCPU_ID, 1, nested_gva); in main()
173 _vcpu_run(vm, VCPU_ID); in main()
180 vcpu_regs_get(vm, VCPU_ID, &regs); in main()
208 state = vcpu_save_state(vm, VCPU_ID); in main()
[all …]
Dplatform_info_test.c24 #define VCPU_ID 0 macro
50 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in test_msr_platform_info_enabled()
54 vcpu_run(vm, VCPU_ID); in test_msr_platform_info_enabled()
59 get_ucall(vm, VCPU_ID, &uc); in test_msr_platform_info_enabled()
70 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in test_msr_platform_info_disabled()
73 vcpu_run(vm, VCPU_ID); in test_msr_platform_info_disabled()
95 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
97 msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO); in main()
98 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, in main()
102 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info); in main()
Demulator_error_test.c14 #define VCPU_ID 1 macro
35 rc = _vcpu_run(vm, VCPU_ID); in run_guest()
62 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in process_exit_on_emulation_error()
96 vcpu_regs_get(vm, VCPU_ID, &regs); in process_exit_on_emulation_error()
98 vcpu_regs_set(vm, VCPU_ID, &regs); in process_exit_on_emulation_error()
111 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in check_for_guest_assert()
115 get_ucall(vm, VCPU_ID, &uc) == UCALL_ABORT) { in check_for_guest_assert()
122 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in process_ucall_done()
132 TEST_ASSERT(get_ucall(vm, VCPU_ID, &uc) == UCALL_DONE, in process_ucall_done()
139 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in process_ucall()
[all …]
Dvmx_pmu_msrs_test.c21 #define VCPU_ID 0 macro
73 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
92 vcpu_set_cpuid(vm, VCPU_ID, cpuid); in main()
96 ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES); in main()
99 vcpu_run(vm, VCPU_ID); in main()
100 ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES); in main()
104 ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), 0); in main()
107 ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), (u64)host_cap.lbr_format); in main()
115 vcpu_set_cpuid(vm, VCPU_ID, cpuid); in main()
123 vcpu_set_cpuid(vm, VCPU_ID, cpuid); in main()
[all …]
Dvmx_preemption_timer_test.c25 #define VCPU_ID 5 macro
178 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
179 run = vcpu_state(vm, VCPU_ID); in main()
181 vcpu_regs_get(vm, VCPU_ID, &regs1); in main()
184 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); in main()
187 _vcpu_run(vm, VCPU_ID); in main()
193 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
235 state = vcpu_save_state(vm, VCPU_ID); in main()
237 vcpu_regs_get(vm, VCPU_ID, &regs1); in main()
243 vm_vcpu_add(vm, VCPU_ID); in main()
[all …]
Dstate_test.c23 #define VCPU_ID 5 macro
167 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
168 run = vcpu_state(vm, VCPU_ID); in main()
170 vcpu_regs_get(vm, VCPU_ID, &regs1); in main()
182 vcpu_args_set(vm, VCPU_ID, 1, nested_gva); in main()
185 _vcpu_run(vm, VCPU_ID); in main()
191 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
209 state = vcpu_save_state(vm, VCPU_ID); in main()
211 vcpu_regs_get(vm, VCPU_ID, &regs1); in main()
217 vm_vcpu_add(vm, VCPU_ID); in main()
[all …]
Dset_sregs_test.c25 #define VCPU_ID 5 macro
40 rc = _vcpu_sregs_set(vm, VCPU_ID, &sregs); in test_cr4_feature_bit()
44 vcpu_sregs_get(vm, VCPU_ID, &sregs); in test_cr4_feature_bit()
99 vm_vcpu_add(vm, VCPU_ID); in main()
101 vcpu_sregs_get(vm, VCPU_ID, &sregs); in main()
106 rc = _vcpu_sregs_set(vm, VCPU_ID, &sregs); in main()
109 vcpu_sregs_get(vm, VCPU_ID, &sregs); in main()
127 vm = vm_create_default(VCPU_ID, 0, NULL); in main()
129 vcpu_sregs_get(vm, VCPU_ID, &sregs); in main()
131 rc = _vcpu_sregs_set(vm, VCPU_ID, &sregs); in main()
[all …]
Ddebug_regs.c13 #define VCPU_ID 0 macro
70 #define APPLY_DEBUG() vcpu_set_guest_debug(vm, VCPU_ID, &debug)
73 vcpu_regs_get(vm, VCPU_ID, &regs); \
75 vcpu_regs_set(vm, VCPU_ID, &regs); \
104 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
105 run = vcpu_state(vm, VCPU_ID); in main()
111 vcpu_run(vm, VCPU_ID); in main()
127 vcpu_run(vm, VCPU_ID); in main()
150 vcpu_run(vm, VCPU_ID); in main()
170 vcpu_regs_get(vm, VCPU_ID, &regs); in main()
[all …]
Dmmu_role_test.c6 #define VCPU_ID 1 macro
34 vm = vm_create_default(VCPU_ID, 0, guest_code); in mmu_role_test()
35 run = vcpu_state(vm, VCPU_ID); in mmu_role_test()
40 r = _vcpu_run(vm, VCPU_ID); in mmu_role_test()
60 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in mmu_role_test()
73 vcpu_init_descriptor_tables(vm, VCPU_ID); in mmu_role_test()
76 r = _vcpu_run(vm, VCPU_ID); in mmu_role_test()
79 cmd = get_ucall(vm, VCPU_ID, NULL); in mmu_role_test()
Dcr4_cpuid_sync_test.c24 #define VCPU_ID 1 macro
83 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
84 run = vcpu_state(vm, VCPU_ID); in main()
87 rc = _vcpu_run(vm, VCPU_ID); in main()
95 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
98 vcpu_sregs_get(vm, VCPU_ID, &sregs); in main()
100 vcpu_sregs_set(vm, VCPU_ID, &sregs); in main()
Dvmx_close_while_nested_test.c21 #define VCPU_ID 5 macro
59 vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code); in main()
63 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); in main()
66 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); in main()
69 vcpu_run(vm, VCPU_ID); in main()
78 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
Dsvm_vmcall_test.c15 #define VCPU_ID 5 macro
46 vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code); in main()
49 vcpu_args_set(vm, VCPU_ID, 1, svm_gva); in main()
52 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); in main()
55 vcpu_run(vm, VCPU_ID); in main()
61 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
Dsvm_int_ctl_test.c16 #define VCPU_ID 0 macro
95 vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code); in main()
98 vcpu_init_descriptor_tables(vm, VCPU_ID); in main()
104 vcpu_args_set(vm, VCPU_ID, 1, svm_gva); in main()
106 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in main()
109 vcpu_run(vm, VCPU_ID); in main()
115 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
Dxen_shinfo_test.c18 #define VCPU_ID 5 macro
128 vm = vm_create_default(VCPU_ID, 0, (void *) guest_code); in main()
129 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
158 vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &vi); in main()
164 vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &pvclock); in main()
171 vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &st); in main()
178 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); in main()
181 vcpu_run(vm, VCPU_ID); in main()
188 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
208 vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &rst); in main()
[all …]
Dhyperv_features.c16 #define VCPU_ID 0 macro
165 vcpu_set_cpuid(vm, VCPU_ID, cpuid); in hv_set_cpuid()
185 run = vcpu_state(vm, VCPU_ID); in guest_test_msrs_access()
318 vcpu_enable_cap(vm, VCPU_ID, &cap); in guest_test_msrs_access()
456 r = _vcpu_run(vm, VCPU_ID); in guest_test_msrs_access()
462 switch (get_ucall(vm, VCPU_ID, &uc)) { in guest_test_msrs_access()
497 run = vcpu_state(vm, VCPU_ID); in guest_test_hcalls_access()
601 r = _vcpu_run(vm, VCPU_ID); in guest_test_hcalls_access()
607 switch (get_ucall(vm, VCPU_ID, &uc)) { in guest_test_hcalls_access()
636 vm = vm_create_default(VCPU_ID, 0, guest_msr); in main()
[all …]
Dxss_msr_test.c15 #define VCPU_ID 1 macro
47 vm = vm_create_default(VCPU_ID, 0, 0); in main()
58 xss_val = vcpu_get_msr(vm, VCPU_ID, MSR_IA32_XSS); in main()
62 vcpu_set_msr(vm, VCPU_ID, MSR_IA32_XSS, xss_val); in main()
70 r = _vcpu_set_msr(vm, VCPU_ID, MSR_IA32_XSS, 1ull << i); in main()
Duserspace_msr_exit_test.c20 #define VCPU_ID 1 macro
402 rc = _vcpu_run(vm, VCPU_ID); in run_guest()
408 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in check_for_guest_assert()
412 get_ucall(vm, VCPU_ID, &uc) == UCALL_ABORT) { in check_for_guest_assert()
420 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in process_rdmsr()
455 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in process_wrmsr()
486 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in process_ucall_done()
496 TEST_ASSERT(get_ucall(vm, VCPU_ID, &uc) == UCALL_DONE, in process_ucall_done()
503 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in process_ucall()
513 switch (get_ucall(vm, VCPU_ID, &uc)) { in process_ucall()
[all …]
Dkvm_pv_test.c174 #define VCPU_ID 0 macro
182 run = vcpu_state(vm, VCPU_ID); in enter_guest()
185 r = _vcpu_run(vm, VCPU_ID); in enter_guest()
191 switch (get_ucall(vm, VCPU_ID, &uc)) { in enter_guest()
218 vm = vm_create_default(VCPU_ID, 0, guest_main); in main()
222 vcpu_enable_cap(vm, VCPU_ID, &cap); in main()
226 vcpu_set_cpuid(vm, VCPU_ID, best); in main()
229 vcpu_init_descriptor_tables(vm, VCPU_ID); in main()
Dvmx_nested_tsc_scaling_test.c19 #define VCPU_ID 0 macro
185 vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code); in main()
187 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); in main()
189 tsc_khz = _vcpu_ioctl(vm, VCPU_ID, KVM_GET_TSC_KHZ, NULL); in main()
193 vcpu_ioctl(vm, VCPU_ID, KVM_SET_TSC_KHZ, in main()
197 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); in main()
200 vcpu_run(vm, VCPU_ID); in main()
206 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
Dhyperv_clock.c174 #define VCPU_ID 0 macro
181 tsc_freq = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TSC_FREQUENCY); in host_check_tsc_msr_rdtsc()
186 t1 = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TIME_REF_COUNT); in host_check_tsc_msr_rdtsc()
189 t2 = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TIME_REF_COUNT); in host_check_tsc_msr_rdtsc()
212 vm = vm_create_default(VCPU_ID, 0, guest_main); in main()
213 run = vcpu_state(vm, VCPU_ID); in main()
215 vcpu_set_hv_cpuid(vm, VCPU_ID); in main()
221 vcpu_args_set(vm, VCPU_ID, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva)); in main()
226 _vcpu_run(vm, VCPU_ID); in main()
232 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()

12