1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2021 Google LLC
4 * Author: Fuad Tabba <tabba@google.com>
5 */
6
7 #include <linux/kvm_host.h>
8 #include <linux/mm.h>
9 #include <nvhe/fixed_config.h>
10 #include <nvhe/trap_handler.h>
11
12 /*
13 * Set trap register values based on features in ID_AA64PFR0.
14 */
pvm_init_traps_aa64pfr0(struct kvm_vcpu * vcpu)15 static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
16 {
17 const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
18 u64 hcr_set = HCR_RW;
19 u64 hcr_clear = 0;
20 u64 cptr_set = 0;
21
22 /* Protected KVM does not support AArch32 guests. */
23 BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
24 PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
25 BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
26 PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
27
28 /*
29 * Linux guests assume support for floating-point and Advanced SIMD. Do
30 * not change the trapping behavior for these from the KVM default.
31 */
32 BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP),
33 PVM_ID_AA64PFR0_ALLOW));
34 BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD),
35 PVM_ID_AA64PFR0_ALLOW));
36
37 /* Trap RAS unless all current versions are supported */
38 if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) <
39 ID_AA64PFR0_EL1_RAS_V1P1) {
40 hcr_set |= HCR_TERR | HCR_TEA;
41 hcr_clear |= HCR_FIEN;
42 }
43
44 /* Trap AMU */
45 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
46 hcr_clear |= HCR_AMVOFFEN;
47 cptr_set |= CPTR_EL2_TAM;
48 }
49
50 /* Trap SVE */
51 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids))
52 cptr_set |= CPTR_EL2_TZ;
53
54 vcpu->arch.hcr_el2 |= hcr_set;
55 vcpu->arch.hcr_el2 &= ~hcr_clear;
56 vcpu->arch.cptr_el2 |= cptr_set;
57 }
58
59 /*
60 * Set trap register values based on features in ID_AA64PFR1.
61 */
pvm_init_traps_aa64pfr1(struct kvm_vcpu * vcpu)62 static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu)
63 {
64 const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR1_EL1);
65 u64 hcr_set = 0;
66 u64 hcr_clear = 0;
67
68 /* Memory Tagging: Trap and Treat as Untagged if not supported. */
69 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) {
70 hcr_set |= HCR_TID5;
71 hcr_clear |= HCR_DCT | HCR_ATA;
72 }
73
74 vcpu->arch.hcr_el2 |= hcr_set;
75 vcpu->arch.hcr_el2 &= ~hcr_clear;
76 }
77
78 /*
79 * Set trap register values based on features in ID_AA64DFR0.
80 */
pvm_init_traps_aa64dfr0(struct kvm_vcpu * vcpu)81 static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
82 {
83 const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
84 u64 mdcr_set = 0;
85 u64 mdcr_clear = 0;
86 u64 cptr_set = 0;
87
88 /* Trap/constrain PMU */
89 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
90 mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
91 mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
92 MDCR_EL2_HPMN_MASK;
93 }
94
95 /* Trap Debug */
96 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids))
97 mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE;
98
99 /* Trap OS Double Lock */
100 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids))
101 mdcr_set |= MDCR_EL2_TDOSA;
102
103 /* Trap SPE */
104 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
105 mdcr_set |= MDCR_EL2_TPMS;
106 mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
107 }
108
109 /* Trap Trace Filter */
110 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
111 mdcr_set |= MDCR_EL2_TTRF;
112
113 /* Trap Trace */
114 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids))
115 cptr_set |= CPTR_EL2_TTA;
116
117 vcpu->arch.mdcr_el2 |= mdcr_set;
118 vcpu->arch.mdcr_el2 &= ~mdcr_clear;
119 vcpu->arch.cptr_el2 |= cptr_set;
120 }
121
122 /*
123 * Set trap register values based on features in ID_AA64MMFR0.
124 */
pvm_init_traps_aa64mmfr0(struct kvm_vcpu * vcpu)125 static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu)
126 {
127 const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR0_EL1);
128 u64 mdcr_set = 0;
129
130 /* Trap Debug Communications Channel registers */
131 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), feature_ids))
132 mdcr_set |= MDCR_EL2_TDCC;
133
134 vcpu->arch.mdcr_el2 |= mdcr_set;
135 }
136
137 /*
138 * Set trap register values based on features in ID_AA64MMFR1.
139 */
pvm_init_traps_aa64mmfr1(struct kvm_vcpu * vcpu)140 static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu)
141 {
142 const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR1_EL1);
143 u64 hcr_set = 0;
144
145 /* Trap LOR */
146 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), feature_ids))
147 hcr_set |= HCR_TLOR;
148
149 vcpu->arch.hcr_el2 |= hcr_set;
150 }
151
152 /*
153 * Set baseline trap register values.
154 */
pvm_init_trap_regs(struct kvm_vcpu * vcpu)155 static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
156 {
157 const u64 hcr_trap_feat_regs = HCR_TID3;
158 const u64 hcr_trap_impdef = HCR_TACR | HCR_TIDCP | HCR_TID1;
159
160 /*
161 * Always trap:
162 * - Feature id registers: to control features exposed to guests
163 * - Implementation-defined features
164 */
165 vcpu->arch.hcr_el2 |= hcr_trap_feat_regs | hcr_trap_impdef;
166
167 /* Clear res0 and set res1 bits to trap potential new features. */
168 vcpu->arch.hcr_el2 &= ~(HCR_RES0);
169 vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
170 vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
171 vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
172 }
173
174 /*
175 * Initialize trap register values for protected VMs.
176 */
__pkvm_vcpu_init_traps(struct kvm_vcpu * vcpu)177 void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
178 {
179 pvm_init_trap_regs(vcpu);
180 pvm_init_traps_aa64pfr0(vcpu);
181 pvm_init_traps_aa64pfr1(vcpu);
182 pvm_init_traps_aa64dfr0(vcpu);
183 pvm_init_traps_aa64mmfr0(vcpu);
184 pvm_init_traps_aa64mmfr1(vcpu);
185 }
186