Lines Matching +full:ecx +full:- +full:2000
1 // SPDX-License-Identifier: GPL-2.0-only
58 #include <asm/intel-family.h>
143 info = (struct ppin_info *)id->driver_data; in ppin_init()
145 if (rdmsrl_safe(info->msr_ppin_ctl, &val)) in ppin_init()
155 wrmsrl_safe(info->msr_ppin_ctl, val | 2UL); in ppin_init()
156 rdmsrl_safe(info->msr_ppin_ctl, &val); in ppin_init()
161 c->ppin = __rdmsr(info->msr_ppin); in ppin_init()
162 set_cpu_cap(c, info->feature); in ppin_init()
167 clear_cpu_cap(c, info->feature); in ppin_init()
186 if (c->cpuid_level == -1) { in default_init()
188 if (c->x86 == 4) in default_init()
189 strcpy(c->x86_model_id, "486"); in default_init()
190 else if (c->x86 == 3) in default_init()
191 strcpy(c->x86_model_id, "386"); in default_init()
208 * IRET will check the segment types kkeil 2000/10/28
230 /* 32-bit code */
232 /* 16-bit code */
234 /* 16-bit data */
236 /* 16-bit data */
238 /* 16-bit data */
244 /* 32-bit code */
246 /* 16-bit code */
262 return -EINVAL; in x86_nopcid_setup()
279 return -EINVAL; in x86_noinvpcid_setup()
292 static int cachesize_override = -1;
354 c->cpuid_level = cpuid_eax(0); in squash_the_stupid_serial_number()
526 * Protection Keys are not available in 32-bit mode.
626 * software. Add those features to this table to auto-disable them.
645 for (df = cpuid_dependent_features; df->feature; df++) { in filter_cpuid_features()
647 if (!cpu_has(c, df->feature)) in filter_cpuid_features()
650 * Note: cpuid_level is set to -1 if unavailable, but in filter_cpuid_features()
656 if (!((s32)df->level < 0 ? in filter_cpuid_features()
657 (u32)df->level > (u32)c->extended_cpuid_level : in filter_cpuid_features()
658 (s32)df->level > (s32)c->cpuid_level)) in filter_cpuid_features()
661 clear_cpu_cap(c, df->feature); in filter_cpuid_features()
666 x86_cap_flag(df->feature), df->level); in filter_cpuid_features()
683 if (c->x86_model >= 16) in table_lookup_model()
689 info = this_cpu->legacy_models; in table_lookup_model()
691 while (info->family) { in table_lookup_model()
692 if (info->family == c->x86) in table_lookup_model()
693 return info->model_names[c->x86_model]; in table_lookup_model()
715 /* The 32-bit entry code needs to find cpu_entry_area. */
719 /* Load the original GDT from the per-cpu structure */
725 gdt_descr.size = GDT_SIZE - 1; in load_direct_gdt()
730 /* Load a fixmap remapping of the per-cpu GDT */
736 gdt_descr.size = GDT_SIZE - 1; in load_fixmap_gdt()
742 * Current gdt points %fs at the "master" per-cpu area: after this,
749 /* Reload the per-cpu base */ in switch_to_new_gdt()
760 if (c->extended_cpuid_level < 0x80000004) in get_model_name()
763 v = (unsigned int *)c->x86_model_id; in get_model_name()
767 c->x86_model_id[48] = 0; in get_model_name()
770 p = q = s = &c->x86_model_id[0]; in get_model_name()
776 /* Note the last non-whitespace index */ in get_model_name()
788 unsigned int eax, ebx, ecx, edx; in detect_num_cpu_cores() local
790 c->x86_max_cores = 1; in detect_num_cpu_cores()
791 if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4) in detect_num_cpu_cores()
794 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); in detect_num_cpu_cores()
796 c->x86_max_cores = (eax >> 26) + 1; in detect_num_cpu_cores()
801 unsigned int n, dummy, ebx, ecx, edx, l2size; in cpu_detect_cache_sizes() local
803 n = c->extended_cpuid_level; in cpu_detect_cache_sizes()
806 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); in cpu_detect_cache_sizes()
807 c->x86_cache_size = (ecx>>24) + (edx>>24); in cpu_detect_cache_sizes()
810 c->x86_tlbsize = 0; in cpu_detect_cache_sizes()
817 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); in cpu_detect_cache_sizes()
818 l2size = ecx >> 16; in cpu_detect_cache_sizes()
821 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); in cpu_detect_cache_sizes()
823 /* do processor-specific cache resizing */ in cpu_detect_cache_sizes()
824 if (this_cpu->legacy_cache_size) in cpu_detect_cache_sizes()
825 l2size = this_cpu->legacy_cache_size(c, l2size); in cpu_detect_cache_sizes()
828 if (cachesize_override != -1) in cpu_detect_cache_sizes()
835 c->x86_cache_size = l2size; in cpu_detect_cache_sizes()
848 if (this_cpu->c_detect_tlb) in cpu_detect_tlb()
849 this_cpu->c_detect_tlb(c); in cpu_detect_tlb()
863 u32 eax, ebx, ecx, edx; in detect_ht_early() local
866 return -1; in detect_ht_early()
869 return -1; in detect_ht_early()
872 return -1; in detect_ht_early()
874 cpuid(1, &eax, &ebx, &ecx, &edx); in detect_ht_early()
878 pr_info_once("CPU0: Hyper-Threading is disabled\n"); in detect_ht_early()
892 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); in detect_ht()
894 smp_num_siblings = smp_num_siblings / c->x86_max_cores; in detect_ht()
898 core_bits = get_count_order(c->x86_max_cores); in detect_ht()
900 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & in detect_ht()
901 ((1 << core_bits) - 1); in detect_ht()
907 char *v = c->x86_vendor_id; in get_cpu_vendor()
914 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || in get_cpu_vendor()
915 (cpu_devs[i]->c_ident[1] && in get_cpu_vendor()
916 !strcmp(v, cpu_devs[i]->c_ident[1]))) { in get_cpu_vendor()
919 c->x86_vendor = this_cpu->c_x86_vendor; in get_cpu_vendor()
927 c->x86_vendor = X86_VENDOR_UNKNOWN; in get_cpu_vendor()
934 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, in cpu_detect()
935 (unsigned int *)&c->x86_vendor_id[0], in cpu_detect()
936 (unsigned int *)&c->x86_vendor_id[8], in cpu_detect()
937 (unsigned int *)&c->x86_vendor_id[4]); in cpu_detect()
939 c->x86 = 4; in cpu_detect()
940 /* Intel-defined flags: level 0x00000001 */ in cpu_detect()
941 if (c->cpuid_level >= 0x00000001) { in cpu_detect()
945 c->x86 = x86_family(tfms); in cpu_detect()
946 c->x86_model = x86_model(tfms); in cpu_detect()
947 c->x86_stepping = x86_stepping(tfms); in cpu_detect()
950 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; in cpu_detect()
951 c->x86_cache_alignment = c->x86_clflush_size; in cpu_detect()
961 c->x86_capability[i] &= ~cpu_caps_cleared[i]; in apply_forced_caps()
962 c->x86_capability[i] |= cpu_caps_set[i]; in apply_forced_caps()
972 * Intel CPUs, for finer-grained selection of what's available. in init_speculation_control()
1009 u32 eax, ebx, ecx, edx; in get_cpu_cap() local
1011 /* Intel-defined flags: level 0x00000001 */ in get_cpu_cap()
1012 if (c->cpuid_level >= 0x00000001) { in get_cpu_cap()
1013 cpuid(0x00000001, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1015 c->x86_capability[CPUID_1_ECX] = ecx; in get_cpu_cap()
1016 c->x86_capability[CPUID_1_EDX] = edx; in get_cpu_cap()
1020 if (c->cpuid_level >= 0x00000006) in get_cpu_cap()
1021 c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); in get_cpu_cap()
1023 /* Additional Intel-defined flags: level 0x00000007 */ in get_cpu_cap()
1024 if (c->cpuid_level >= 0x00000007) { in get_cpu_cap()
1025 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1026 c->x86_capability[CPUID_7_0_EBX] = ebx; in get_cpu_cap()
1027 c->x86_capability[CPUID_7_ECX] = ecx; in get_cpu_cap()
1028 c->x86_capability[CPUID_7_EDX] = edx; in get_cpu_cap()
1030 /* Check valid sub-leaf index before accessing it */ in get_cpu_cap()
1032 cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1033 c->x86_capability[CPUID_7_1_EAX] = eax; in get_cpu_cap()
1038 if (c->cpuid_level >= 0x0000000d) { in get_cpu_cap()
1039 cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1041 c->x86_capability[CPUID_D_1_EAX] = eax; in get_cpu_cap()
1044 /* AMD-defined flags: level 0x80000001 */ in get_cpu_cap()
1046 c->extended_cpuid_level = eax; in get_cpu_cap()
1050 cpuid(0x80000001, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1052 c->x86_capability[CPUID_8000_0001_ECX] = ecx; in get_cpu_cap()
1053 c->x86_capability[CPUID_8000_0001_EDX] = edx; in get_cpu_cap()
1057 if (c->extended_cpuid_level >= 0x80000007) { in get_cpu_cap()
1058 cpuid(0x80000007, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1060 c->x86_capability[CPUID_8000_0007_EBX] = ebx; in get_cpu_cap()
1061 c->x86_power = edx; in get_cpu_cap()
1064 if (c->extended_cpuid_level >= 0x80000008) { in get_cpu_cap()
1065 cpuid(0x80000008, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1066 c->x86_capability[CPUID_8000_0008_EBX] = ebx; in get_cpu_cap()
1069 if (c->extended_cpuid_level >= 0x8000000a) in get_cpu_cap()
1070 c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); in get_cpu_cap()
1072 if (c->extended_cpuid_level >= 0x8000001f) in get_cpu_cap()
1073 c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f); in get_cpu_cap()
1080 * This needs to happen each time we re-probe, which may happen in get_cpu_cap()
1088 u32 eax, ebx, ecx, edx; in get_cpu_address_sizes() local
1090 if (c->extended_cpuid_level >= 0x80000008) { in get_cpu_address_sizes()
1091 cpuid(0x80000008, &eax, &ebx, &ecx, &edx); in get_cpu_address_sizes()
1093 c->x86_virt_bits = (eax >> 8) & 0xff; in get_cpu_address_sizes()
1094 c->x86_phys_bits = eax & 0xff; in get_cpu_address_sizes()
1098 c->x86_phys_bits = 36; in get_cpu_address_sizes()
1100 c->x86_cache_bits = c->x86_phys_bits; in get_cpu_address_sizes()
1113 c->x86 = 4; in identify_cpu_without_cpuid()
1115 c->x86 = 3; in identify_cpu_without_cpuid()
1118 if (cpu_devs[i] && cpu_devs[i]->c_identify) { in identify_cpu_without_cpuid()
1119 c->x86_vendor_id[0] = 0; in identify_cpu_without_cpuid()
1120 cpu_devs[i]->c_identify(c); in identify_cpu_without_cpuid()
1121 if (c->x86_vendor_id[0]) { in identify_cpu_without_cpuid()
1192 * updated non-speculatively, and the issuing of %gs-relative memory
1201 /* AMD Family 0xf - 0x12 */
1207 /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
1278 return m && !!(m->driver_data & which); in cpu_matches()
1335 * - TSX is supported or in cpu_set_bug_bits()
1336 * - TSX_CTRL is present in cpu_set_bug_bits()
1408 * probing for it doesn't even work. Disable it completely on 32-bit
1410 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
1472 /* empty-string, i.e., ""-defined feature flags */ in cpu_parse_early_param()
1526 c->x86_clflush_size = 64; in early_identify_cpu()
1527 c->x86_phys_bits = 36; in early_identify_cpu()
1528 c->x86_virt_bits = 48; in early_identify_cpu()
1530 c->x86_clflush_size = 32; in early_identify_cpu()
1531 c->x86_phys_bits = 32; in early_identify_cpu()
1532 c->x86_virt_bits = 32; in early_identify_cpu()
1534 c->x86_cache_alignment = c->x86_clflush_size; in early_identify_cpu()
1536 memset(&c->x86_capability, 0, sizeof(c->x86_capability)); in early_identify_cpu()
1537 c->extended_cpuid_level = 0; in early_identify_cpu()
1551 if (this_cpu->c_early_init) in early_identify_cpu()
1552 this_cpu->c_early_init(c); in early_identify_cpu()
1554 c->cpu_index = 0; in early_identify_cpu()
1557 if (this_cpu->c_bsp_init) in early_identify_cpu()
1558 this_cpu->c_bsp_init(c); in early_identify_cpu()
1576 * that it can't be enabled in 32-bit mode. in early_identify_cpu()
1583 * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not in early_identify_cpu()
1585 * false-positives at the later stage. in early_identify_cpu()
1588 * - 5-level paging is disabled compile-time; in early_identify_cpu()
1589 * - it's 32-bit kernel; in early_identify_cpu()
1590 * - machine doesn't support 5-level paging; in early_identify_cpu()
1591 * - user specified 'no5lvl' in kernel command line. in early_identify_cpu()
1621 if (!cpudev->c_ident[j]) in early_cpu_init()
1623 pr_info(" %s %s\n", cpudev->c_vendor, in early_cpu_init()
1624 cpudev->c_ident[j]); in early_cpu_init()
1642 * detect it directly instead of hard-coding the choice by in detect_null_seg_behavior()
1665 if (c->extended_cpuid_level >= 0x80000021 && in check_null_seg_clears_base()
1685 if ((c->x86 == 0x17 || c->x86 == 0x18) && in check_null_seg_clears_base()
1695 c->extended_cpuid_level = 0; in generic_identify()
1712 if (c->cpuid_level >= 0x00000001) { in generic_identify()
1713 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; in generic_identify()
1716 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); in generic_identify()
1718 c->apicid = c->initial_apicid; in generic_identify()
1721 c->phys_proc_id = c->initial_apicid; in generic_identify()
1733 * NB: For the time being, only 32-bit kernels support in generic_identify()
1734 * X86_BUG_ESPFIX as such. 64-bit kernels directly choose in generic_identify()
1736 * non-paravirt system ever shows up that does *not* have the in generic_identify()
1753 apicid = apic->cpu_present_to_apicid(cpu); in validate_apic_and_package_id()
1755 if (apicid != c->apicid) { in validate_apic_and_package_id()
1757 cpu, apicid, c->initial_apicid); in validate_apic_and_package_id()
1759 BUG_ON(topology_update_package_map(c->phys_proc_id, cpu)); in validate_apic_and_package_id()
1760 BUG_ON(topology_update_die_map(c->cpu_die_id, cpu)); in validate_apic_and_package_id()
1762 c->logical_proc_id = 0; in validate_apic_and_package_id()
1773 c->loops_per_jiffy = loops_per_jiffy; in identify_cpu()
1774 c->x86_cache_size = 0; in identify_cpu()
1775 c->x86_vendor = X86_VENDOR_UNKNOWN; in identify_cpu()
1776 c->x86_model = c->x86_stepping = 0; /* So far unknown... */ in identify_cpu()
1777 c->x86_vendor_id[0] = '\0'; /* Unset */ in identify_cpu()
1778 c->x86_model_id[0] = '\0'; /* Unset */ in identify_cpu()
1779 c->x86_max_cores = 1; in identify_cpu()
1780 c->x86_coreid_bits = 0; in identify_cpu()
1781 c->cu_id = 0xff; in identify_cpu()
1783 c->x86_clflush_size = 64; in identify_cpu()
1784 c->x86_phys_bits = 36; in identify_cpu()
1785 c->x86_virt_bits = 48; in identify_cpu()
1787 c->cpuid_level = -1; /* CPUID not detected */ in identify_cpu()
1788 c->x86_clflush_size = 32; in identify_cpu()
1789 c->x86_phys_bits = 32; in identify_cpu()
1790 c->x86_virt_bits = 32; in identify_cpu()
1792 c->x86_cache_alignment = c->x86_clflush_size; in identify_cpu()
1793 memset(&c->x86_capability, 0, sizeof(c->x86_capability)); in identify_cpu()
1795 memset(&c->vmx_capability, 0, sizeof(c->vmx_capability)); in identify_cpu()
1800 if (this_cpu->c_identify) in identify_cpu()
1801 this_cpu->c_identify(c); in identify_cpu()
1807 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); in identify_cpu()
1811 * Vendor-specific initialization. In this section we in identify_cpu()
1817 * At the end of this section, c->x86_capability better in identify_cpu()
1820 if (this_cpu->c_init) in identify_cpu()
1821 this_cpu->c_init(c); in identify_cpu()
1838 * The vendor-specific functions might have changed features. in identify_cpu()
1846 if (!c->x86_model_id[0]) { in identify_cpu()
1850 strcpy(c->x86_model_id, p); in identify_cpu()
1853 sprintf(c->x86_model_id, "%02x/%02x", in identify_cpu()
1854 c->x86, c->x86_model); in identify_cpu()
1880 boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; in identify_cpu()
1884 c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; in identify_cpu()
1901 * on 32-bit kernels:
1916 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- in enable_sep_cpu()
1920 tss->x86_tss.ss1 = __KERNEL_CS; in enable_sep_cpu()
1921 wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); in enable_sep_cpu()
1963 if (c->x86_vendor < X86_VENDOR_NUM) { in print_cpu_info()
1964 vendor = this_cpu->c_vendor; in print_cpu_info()
1966 if (c->cpuid_level >= 0) in print_cpu_info()
1967 vendor = c->x86_vendor_id; in print_cpu_info()
1970 if (vendor && !strstr(c->x86_model_id, vendor)) in print_cpu_info()
1973 if (c->x86_model_id[0]) in print_cpu_info()
1974 pr_cont("%s", c->x86_model_id); in print_cpu_info()
1976 pr_cont("%d86", c->x86); in print_cpu_info()
1978 pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); in print_cpu_info()
1980 if (c->x86_stepping || c->cpuid_level >= 0) in print_cpu_info()
1981 pr_cont(", stepping: 0x%x)\n", c->x86_stepping); in print_cpu_info()
2020 * Intel CPUs do not support 32-bit SYSCALL. Writing to MSR_CSTAR in wrmsrl_cstar()
2038 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. in syscall_init()
2040 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). in syscall_init()
2055 * to minimize user space-kernel interference. in syscall_init()
2148 d.d = 1; /* 32-bit */ in setup_getcpu()
2161 /* Set up the per-CPU TSS IST stacks */ in tss_setup_ist()
2162 tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF); in tss_setup_ist()
2163 tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI); in tss_setup_ist()
2164 tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB); in tss_setup_ist()
2165 tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE); in tss_setup_ist()
2166 /* Only mapped when SEV-ES is active */ in tss_setup_ist()
2167 tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC); in tss_setup_ist()
2185 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID; in tss_setup_io_bitmap()
2188 tss->io_bitmap.prev_max = 0; in tss_setup_io_bitmap()
2189 tss->io_bitmap.prev_sequence = 0; in tss_setup_io_bitmap()
2190 memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap)); in tss_setup_io_bitmap()
2195 tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL; in tss_setup_io_bitmap()
2214 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); in cpu_init_exception_handling()
2226 * cpu_init() initializes state that is per-CPU. Some data is already
2252 * Initialize the per-CPU GDT with the boot GDT, in cpu_init()
2259 memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); in cpu_init()
2270 cur->active_mm = &init_mm; in cpu_init()
2271 BUG_ON(cur->mm); in cpu_init()
2300 * Relies on the BP having set-up the IDT tables, which are loaded in cpu_init_secondary()
2336 …pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS… in microcode_check()