Lines Matching +full:ipa +full:- +full:reg

1 // SPDX-License-Identifier: GPL-2.0-only
9 * there's a little bit of over-abstraction that tends to obscure what's going
14 * user-visible instructions are available only on a subset of the available
18 * snapshot state to indicate the lowest-common denominator of the feature,
31 * - Mismatched features are *always* sanitised to a "safe" value, which
34 * - A mismatched feature marked with FTR_STRICT will cause a "SANITY CHECK"
38 * - Features marked as FTR_VISIBLE have their sanitised value visible to
43 * - A "feature" is typically a 4-bit register field. A "capability" is the
44 * high-level description derived from the sanitised field value.
46 * - Read the Arm ARM (DDI 0487F.a) section D13.1.3 ("Principles of the ID
50 * - KVM exposes its own view of the feature registers to guest operating
57 * - If the arm64_ftr_bits[] for a register has a missing field, then this
125 /* file-wide pr_fmt adds "CPU features: " prefix */ in dump_cpu_features()
266 * Page size not being supported at Stage-2 is not fatal. You
271 * advertises a given granule size at Stage-2 (value 2) on some
272 * vCPUs, and uses the fallback to Stage-1 (value 0) for other
288 * along with it and treat them as non-strict.
348 * Linux can handle differing I-cache policies. Userspace JITs will
350 * If we have differing I-cache policies, report it as the weakest - VIPT.
525 * id_isar[1-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
551 .reg = &(struct arm64_ftr_reg){ \
558 struct arm64_ftr_reg *reg; member
619 return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id; in search_cmp_ftr_reg()
623 * get_arm64_ftr_reg_nowarn - Looks up a feature register entry using
628 * returns - Upon success, matching ftr_reg entry for id.
629 * - NULL on failure. It is upto the caller to decide
642 return ret->reg; in get_arm64_ftr_reg_nowarn()
647 * get_arm64_ftr_reg - Looks up a feature register entry using
650 * returns - Upon success, matching ftr_reg entry for id.
651 * - NULL on failure but with an WARN_ON().
655 struct arm64_ftr_reg *reg; in get_arm64_ftr_reg() local
657 reg = get_arm64_ftr_reg_nowarn(sys_id); in get_arm64_ftr_reg()
660 * Requesting a non-existent register search is an error. Warn in get_arm64_ftr_reg()
663 WARN_ON(!reg); in get_arm64_ftr_reg()
664 return reg; in get_arm64_ftr_reg()
667 static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg, in arm64_ftr_set_value() argument
672 reg &= ~mask; in arm64_ftr_set_value()
673 reg |= (ftr_val << ftrp->shift) & mask; in arm64_ftr_set_value()
674 return reg; in arm64_ftr_set_value()
682 switch (ftrp->type) { in arm64_ftr_safe_value()
684 ret = ftrp->safe_val; in arm64_ftr_safe_value()
708 const struct arm64_ftr_reg *ftr_reg = arm64_ftr_regs[i].reg; in sort_ftr_regs()
709 const struct arm64_ftr_bits *ftr_bits = ftr_reg->ftr_bits; in sort_ftr_regs()
716 for (; ftr_bits->width != 0; ftr_bits++, j++) { in sort_ftr_regs()
717 unsigned int width = ftr_reg->ftr_bits[j].width; in sort_ftr_regs()
718 unsigned int shift = ftr_reg->ftr_bits[j].shift; in sort_ftr_regs()
723 ftr_reg->name, shift); in sort_ftr_regs()
732 prev_shift = ftr_reg->ftr_bits[j - 1].shift; in sort_ftr_regs()
735 ftr_reg->name, shift); in sort_ftr_regs()
749 BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id); in sort_ftr_regs()
757 * RES0 for the system-wide value, and must strictly match.
767 struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg); in init_cpu_ftr_reg() local
769 if (!reg) in init_cpu_ftr_reg()
772 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { in init_cpu_ftr_reg()
779 if (!ftrp->strict) in init_cpu_ftr_reg()
781 if (ftrp->visible) in init_cpu_ftr_reg()
784 reg->user_val = arm64_ftr_set_value(ftrp, in init_cpu_ftr_reg()
785 reg->user_val, in init_cpu_ftr_reg()
786 ftrp->safe_val); in init_cpu_ftr_reg()
791 reg->sys_val = val; in init_cpu_ftr_reg()
792 reg->strict_mask = strict_mask; in init_cpu_ftr_reg()
793 reg->user_mask = user_mask; in init_cpu_ftr_reg()
802 for (; caps->matches; caps++) { in init_cpu_hwcaps_indirect_list_from_array()
803 if (WARN(caps->capability >= ARM64_NCAPS, in init_cpu_hwcaps_indirect_list_from_array()
804 "Invalid capability %d\n", caps->capability)) in init_cpu_hwcaps_indirect_list_from_array()
806 if (WARN(cpu_hwcaps_ptrs[caps->capability], in init_cpu_hwcaps_indirect_list_from_array()
808 caps->capability)) in init_cpu_hwcaps_indirect_list_from_array()
810 cpu_hwcaps_ptrs[caps->capability] = caps; in init_cpu_hwcaps_indirect_list_from_array()
827 init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr); in init_cpu_features()
828 init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid); in init_cpu_features()
829 init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq); in init_cpu_features()
830 init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0); in init_cpu_features()
831 init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1); in init_cpu_features()
832 init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0); in init_cpu_features()
833 init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1); in init_cpu_features()
834 init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0); in init_cpu_features()
835 init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1); in init_cpu_features()
836 init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2); in init_cpu_features()
837 init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0); in init_cpu_features()
838 init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1); in init_cpu_features()
839 init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0); in init_cpu_features()
841 if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { in init_cpu_features()
842 init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0); in init_cpu_features()
843 init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1); in init_cpu_features()
844 init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0); in init_cpu_features()
845 init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1); in init_cpu_features()
846 init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2); in init_cpu_features()
847 init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3); in init_cpu_features()
848 init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4); in init_cpu_features()
849 init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5); in init_cpu_features()
850 init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6); in init_cpu_features()
851 init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0); in init_cpu_features()
852 init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1); in init_cpu_features()
853 init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2); in init_cpu_features()
854 init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3); in init_cpu_features()
855 init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4); in init_cpu_features()
856 init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5); in init_cpu_features()
857 init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0); in init_cpu_features()
858 init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1); in init_cpu_features()
859 init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2); in init_cpu_features()
860 init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0); in init_cpu_features()
861 init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1); in init_cpu_features()
862 init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2); in init_cpu_features()
865 if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) { in init_cpu_features()
866 init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr); in init_cpu_features()
883 static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new) in update_cpu_ftr_reg() argument
887 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { in update_cpu_ftr_reg()
888 s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val); in update_cpu_ftr_reg()
895 reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new); in update_cpu_ftr_reg()
908 if ((boot & regp->strict_mask) == (val & regp->strict_mask)) in check_update_ftr_reg()
911 regp->name, boot, cpu, val); in check_update_ftr_reg()
923 for (ftrp = regp->ftr_bits; ftrp->width; ftrp++) { in relax_cpu_ftr_reg()
924 if (ftrp->shift == field) { in relax_cpu_ftr_reg()
925 regp->strict_mask &= ~arm64_ftr_mask(ftrp); in relax_cpu_ftr_reg()
931 WARN_ON(!ftrp->width); in relax_cpu_ftr_reg()
950 * EL1-dependent register fields to avoid spurious sanity check fails. in update_32bit_cpu_features()
962 info->reg_id_dfr0, boot->reg_id_dfr0); in update_32bit_cpu_features()
964 info->reg_id_dfr1, boot->reg_id_dfr1); in update_32bit_cpu_features()
966 info->reg_id_isar0, boot->reg_id_isar0); in update_32bit_cpu_features()
968 info->reg_id_isar1, boot->reg_id_isar1); in update_32bit_cpu_features()
970 info->reg_id_isar2, boot->reg_id_isar2); in update_32bit_cpu_features()
972 info->reg_id_isar3, boot->reg_id_isar3); in update_32bit_cpu_features()
974 info->reg_id_isar4, boot->reg_id_isar4); in update_32bit_cpu_features()
976 info->reg_id_isar5, boot->reg_id_isar5); in update_32bit_cpu_features()
978 info->reg_id_isar6, boot->reg_id_isar6); in update_32bit_cpu_features()
986 info->reg_id_mmfr0, boot->reg_id_mmfr0); in update_32bit_cpu_features()
988 info->reg_id_mmfr1, boot->reg_id_mmfr1); in update_32bit_cpu_features()
990 info->reg_id_mmfr2, boot->reg_id_mmfr2); in update_32bit_cpu_features()
992 info->reg_id_mmfr3, boot->reg_id_mmfr3); in update_32bit_cpu_features()
994 info->reg_id_mmfr4, boot->reg_id_mmfr4); in update_32bit_cpu_features()
996 info->reg_id_mmfr5, boot->reg_id_mmfr5); in update_32bit_cpu_features()
998 info->reg_id_pfr0, boot->reg_id_pfr0); in update_32bit_cpu_features()
1000 info->reg_id_pfr1, boot->reg_id_pfr1); in update_32bit_cpu_features()
1002 info->reg_id_pfr2, boot->reg_id_pfr2); in update_32bit_cpu_features()
1004 info->reg_mvfr0, boot->reg_mvfr0); in update_32bit_cpu_features()
1006 info->reg_mvfr1, boot->reg_mvfr1); in update_32bit_cpu_features()
1008 info->reg_mvfr2, boot->reg_mvfr2); in update_32bit_cpu_features()
1015 * non-boot CPU. Also performs SANITY checks to make sure that there
1025 * The kernel can handle differing I-cache policies, but otherwise in update_cpu_features()
1030 info->reg_ctr, boot->reg_ctr); in update_cpu_features()
1038 info->reg_dczid, boot->reg_dczid); in update_cpu_features()
1042 info->reg_cntfrq, boot->reg_cntfrq); in update_cpu_features()
1045 * The kernel uses self-hosted debug features and expects CPUs to in update_cpu_features()
1051 info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0); in update_cpu_features()
1053 info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1); in update_cpu_features()
1055 * Even in big.LITTLE, processors should be identical instruction-set in update_cpu_features()
1059 info->reg_id_aa64isar0, boot->reg_id_aa64isar0); in update_cpu_features()
1061 info->reg_id_aa64isar1, boot->reg_id_aa64isar1); in update_cpu_features()
1069 info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0); in update_cpu_features()
1071 info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1); in update_cpu_features()
1073 info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2); in update_cpu_features()
1076 info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0); in update_cpu_features()
1078 info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1); in update_cpu_features()
1081 info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0); in update_cpu_features()
1083 if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) { in update_cpu_features()
1085 info->reg_zcr, boot->reg_zcr); in update_cpu_features()
1115 return regp->sys_val; in read_sanitised_ftr_reg()
1123 * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
1172 #include <linux/irqchip/arm-gic-v3.h>
1175 feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry) in feature_matches() argument
1177 int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign); in feature_matches()
1179 return val >= entry->min_field_value; in feature_matches()
1189 val = read_sanitised_ftr_reg(entry->sys_reg); in has_cpuid_feature()
1191 val = __read_sysreg_by_encoding(entry->sys_reg); in has_cpuid_feature()
1206 entry->desc); in has_useable_gicv3_cpuif()
1271 * Kdump isn't guaranteed to power-off all secondary CPUs, CNP in has_useable_cnp()
1284 * CPU to detect the need for non-global mappings and thus avoiding a
1285 * pagetable re-write after all the CPUs are booted. This check will be
1287 * state once the SMP CPUs are up and thus make the switch to non-global
1360 * ThunderX leads to apparent I-cache corruption of kernel text, which in unmap_kernel_at_el0()
1365 __kpti_forced = -1; in unmap_kernel_at_el0()
1378 __kpti_forced = -1; in unmap_kernel_at_el0()
1407 * We don't need to rewrite the page-tables if either we've done in kpti_install_ng_mappings()
1440 __kpti_forced = enabled ? 1 : -1; in parse_kpti()
1460 MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0 in cpu_has_broken_dbm()
1487 * DBM is a non-conflicting feature. i.e, the kernel can safely in has_hw_dbm()
1497 * This is safe as all CPUs (including secondary CPUs - due to the in has_hw_dbm()
1498 * LOCAL_CPU scope - and the hotplugged CPUs - via verification) in has_hw_dbm()
1548 * The AMU extension is a non-conflicting feature: the kernel can in has_amu()
1577 * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to in cpu_copy_el2regs()
1623 * The ptr-auth feature levels are not intercompatible with lower in has_address_auth_cpucap()
1624 * levels. Hence we must match ptr-auth feature level of the secondary in has_address_auth_cpucap()
1632 boot_val = cpuid_feature_extract_field(read_sanitised_ftr_reg(entry->sys_reg), in has_address_auth_cpucap()
1633 entry->field_pos, entry->sign); in has_address_auth_cpucap()
1635 return boot_val >= entry->min_field_value; in has_address_auth_cpucap()
1637 sec_val = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg), in has_address_auth_cpucap()
1638 entry->field_pos, entry->sign); in has_address_auth_cpucap()
1685 * Use of X16/X17 for tail-calls and trampolines that jump to in bti_enable()
1716 return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU); in cpucap_late_cpu_optional()
1722 return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU); in cpucap_late_cpu_permitted()
1728 return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT); in cpucap_panic_on_conflict()
1805 .desc = "32-bit EL0 Support",
1816 .desc = "32-bit EL1 Support",
1927 .desc = "Stage-2 Force Write-Back",
2144 #define HWCAP_CPUID_MATCH(reg, field, s, min_value) \ argument
2146 .sys_reg = reg, \
2157 #define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \ argument
2160 HWCAP_CPUID_MATCH(reg, field, s, min_value) \
2269 * check is future proof, by making sure value is non-zero. in compat_has_neon()
2303 switch (cap->hwcap_type) { in cap_set_elf_hwcap()
2305 cpu_set_feature(cap->hwcap); in cap_set_elf_hwcap()
2309 compat_elf_hwcap |= (u32)cap->hwcap; in cap_set_elf_hwcap()
2312 compat_elf_hwcap2 |= (u32)cap->hwcap; in cap_set_elf_hwcap()
2326 switch (cap->hwcap_type) { in cpus_have_elf_hwcap()
2328 rc = cpu_have_feature(cap->hwcap); in cpus_have_elf_hwcap()
2332 rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0; in cpus_have_elf_hwcap()
2335 rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0; in cpus_have_elf_hwcap()
2350 for (; hwcaps->matches; hwcaps++) in setup_elf_hwcaps()
2351 if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps))) in setup_elf_hwcaps()
2363 if (!caps || !(caps->type & scope_mask) || in update_cpu_capabilities()
2364 cpus_have_cap(caps->capability) || in update_cpu_capabilities()
2365 !caps->matches(caps, cpucap_default_scope(caps))) in update_cpu_capabilities()
2368 if (caps->desc) in update_cpu_capabilities()
2369 pr_info("detected: %s\n", caps->desc); in update_cpu_capabilities()
2370 cpus_set_cap(caps->capability); in update_cpu_capabilities()
2372 if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU)) in update_cpu_capabilities()
2373 set_bit(caps->capability, boot_capabilities); in update_cpu_capabilities()
2392 if (!(cap->type & non_boot_scope)) in cpu_enable_non_boot_scope_capabilities()
2395 if (cap->cpu_enable) in cpu_enable_non_boot_scope_capabilities()
2396 cap->cpu_enable(cap); in cpu_enable_non_boot_scope_capabilities()
2418 if (!caps || !(caps->type & scope_mask)) in enable_cpu_capabilities()
2420 num = caps->capability; in enable_cpu_capabilities()
2427 if (boot_scope && caps->cpu_enable) in enable_cpu_capabilities()
2437 caps->cpu_enable(caps); in enable_cpu_capabilities()
2441 * For all non-boot scope capabilities, use stop_machine() in enable_cpu_capabilities()
2466 if (!caps || !(caps->type & scope_mask)) in verify_local_cpu_caps()
2469 cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU); in verify_local_cpu_caps()
2470 system_has_cap = cpus_have_cap(caps->capability); in verify_local_cpu_caps()
2485 if (caps->cpu_enable) in verify_local_cpu_caps()
2486 caps->cpu_enable(caps); in verify_local_cpu_caps()
2499 smp_processor_id(), caps->capability, in verify_local_cpu_caps()
2500 caps->desc, system_has_cap, cpu_has_cap); in verify_local_cpu_caps()
2524 for (; caps->matches; caps++) in verify_local_elf_hwcaps()
2525 if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) { in verify_local_elf_hwcaps()
2527 smp_processor_id(), caps->desc); in verify_local_elf_hwcaps()
2570 /* Verify IPA range */ in verify_hyp_capabilities()
2575 pr_crit("CPU%d: IPA range mismatch\n", smp_processor_id()); in verify_hyp_capabilities()
2643 return cap->matches(cap, SCOPE_LOCAL_CPU); in this_cpu_has_cap()
2651 * - The system wide safe registers are set with all the SMP CPUs and,
2652 * - The SYSTEM_FEATURE cpu_hwcaps may not have been set.
2661 return cap->matches(cap, SCOPE_SYSTEM); in __system_matches_cap()
2698 * We have finalised the system-wide safe feature in setup_system_capabilities()
2748 * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
2749 * See Table C5-6 System instruction encodings for System register accesses,
2762 * With CRm == 0, reg should be one of :
2779 return -EINVAL; in emulate_id_reg()
2790 return -EINVAL; in emulate_sys_reg()