Lines Matching full:best

78 	struct kvm_cpuid_entry2 *best;  in kvm_check_cpuid()  local
84 best = cpuid_entry2_find(entries, nent, 0x80000008, 0); in kvm_check_cpuid()
85 if (best) { in kvm_check_cpuid()
86 int vaddr_bits = (best->eax & 0xff00) >> 8; in kvm_check_cpuid()
97 struct kvm_cpuid_entry2 *best; in kvm_update_pv_runtime() local
99 best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0); in kvm_update_pv_runtime()
105 if (best) in kvm_update_pv_runtime()
106 vcpu->arch.pv_cpuid.features = best->eax; in kvm_update_pv_runtime()
111 struct kvm_cpuid_entry2 *best; in kvm_update_cpuid_runtime() local
113 best = kvm_find_cpuid_entry(vcpu, 1, 0); in kvm_update_cpuid_runtime()
114 if (best) { in kvm_update_cpuid_runtime()
117 cpuid_entry_change(best, X86_FEATURE_OSXSAVE, in kvm_update_cpuid_runtime()
120 cpuid_entry_change(best, X86_FEATURE_APIC, in kvm_update_cpuid_runtime()
124 best = kvm_find_cpuid_entry(vcpu, 7, 0); in kvm_update_cpuid_runtime()
125 if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) in kvm_update_cpuid_runtime()
126 cpuid_entry_change(best, X86_FEATURE_OSPKE, in kvm_update_cpuid_runtime()
129 best = kvm_find_cpuid_entry(vcpu, 0xD, 0); in kvm_update_cpuid_runtime()
130 if (best) in kvm_update_cpuid_runtime()
131 best->ebx = xstate_required_size(vcpu->arch.xcr0, false); in kvm_update_cpuid_runtime()
133 best = kvm_find_cpuid_entry(vcpu, 0xD, 1); in kvm_update_cpuid_runtime()
134 if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) || in kvm_update_cpuid_runtime()
135 cpuid_entry_has(best, X86_FEATURE_XSAVEC))) in kvm_update_cpuid_runtime()
136 best->ebx = xstate_required_size(vcpu->arch.xcr0, true); in kvm_update_cpuid_runtime()
138 best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0); in kvm_update_cpuid_runtime()
139 if (kvm_hlt_in_guest(vcpu->kvm) && best && in kvm_update_cpuid_runtime()
140 (best->eax & (1 << KVM_FEATURE_PV_UNHALT))) in kvm_update_cpuid_runtime()
141 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT); in kvm_update_cpuid_runtime()
144 best = kvm_find_cpuid_entry(vcpu, 0x1, 0); in kvm_update_cpuid_runtime()
145 if (best) in kvm_update_cpuid_runtime()
146 cpuid_entry_change(best, X86_FEATURE_MWAIT, in kvm_update_cpuid_runtime()
156 struct kvm_cpuid_entry2 *best; in kvm_vcpu_after_set_cpuid() local
158 best = kvm_find_cpuid_entry(vcpu, 1, 0); in kvm_vcpu_after_set_cpuid()
159 if (best && apic) { in kvm_vcpu_after_set_cpuid()
160 if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER)) in kvm_vcpu_after_set_cpuid()
168 best = kvm_find_cpuid_entry(vcpu, 0xD, 0); in kvm_vcpu_after_set_cpuid()
169 if (!best) in kvm_vcpu_after_set_cpuid()
173 (best->eax | ((u64)best->edx << 32)) & supported_xcr0; in kvm_vcpu_after_set_cpuid()
183 best = kvm_find_cpuid_entry(vcpu, 0x12, 0x1); in kvm_vcpu_after_set_cpuid()
184 if (best) { in kvm_vcpu_after_set_cpuid()
185 best->ecx &= vcpu->arch.guest_supported_xcr0 & 0xffffffff; in kvm_vcpu_after_set_cpuid()
186 best->edx &= vcpu->arch.guest_supported_xcr0 >> 32; in kvm_vcpu_after_set_cpuid()
187 best->ecx |= XFEATURE_MASK_FPSSE; in kvm_vcpu_after_set_cpuid()
213 struct kvm_cpuid_entry2 *best; in cpuid_query_maxphyaddr() local
215 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0); in cpuid_query_maxphyaddr()
216 if (!best || best->eax < 0x80000008) in cpuid_query_maxphyaddr()
218 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); in cpuid_query_maxphyaddr()
219 if (best) in cpuid_query_maxphyaddr()
220 return best->eax & 0xff; in cpuid_query_maxphyaddr()