1 /*
2 * VGIC system registers handling functions for AArch64 mode
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14 #include <linux/irqchip/arm-gic-v3.h>
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <asm/kvm_emulate.h>
18 #include "vgic.h"
19 #include "sys_regs.h"
20
access_gic_ctlr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)21 static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
22 const struct sys_reg_desc *r)
23 {
24 u32 host_pri_bits, host_id_bits, host_seis, host_a3v, seis, a3v;
25 struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
26 struct vgic_vmcr vmcr;
27 u64 val;
28
29 vgic_get_vmcr(vcpu, &vmcr);
30 if (p->is_write) {
31 val = p->regval;
32
33 /*
34 * Disallow restoring VM state if not supported by this
35 * hardware.
36 */
37 host_pri_bits = ((val & ICC_CTLR_EL1_PRI_BITS_MASK) >>
38 ICC_CTLR_EL1_PRI_BITS_SHIFT) + 1;
39 if (host_pri_bits > vgic_v3_cpu->num_pri_bits)
40 return false;
41
42 vgic_v3_cpu->num_pri_bits = host_pri_bits;
43
44 host_id_bits = (val & ICC_CTLR_EL1_ID_BITS_MASK) >>
45 ICC_CTLR_EL1_ID_BITS_SHIFT;
46 if (host_id_bits > vgic_v3_cpu->num_id_bits)
47 return false;
48
49 vgic_v3_cpu->num_id_bits = host_id_bits;
50
51 host_seis = ((kvm_vgic_global_state.ich_vtr_el2 &
52 ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT);
53 seis = (val & ICC_CTLR_EL1_SEIS_MASK) >>
54 ICC_CTLR_EL1_SEIS_SHIFT;
55 if (host_seis != seis)
56 return false;
57
58 host_a3v = ((kvm_vgic_global_state.ich_vtr_el2 &
59 ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT);
60 a3v = (val & ICC_CTLR_EL1_A3V_MASK) >> ICC_CTLR_EL1_A3V_SHIFT;
61 if (host_a3v != a3v)
62 return false;
63
64 /*
65 * Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
66 * The vgic_set_vmcr() will convert to ICH_VMCR layout.
67 */
68 vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT;
69 vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT;
70 vgic_set_vmcr(vcpu, &vmcr);
71 } else {
72 val = 0;
73 val |= (vgic_v3_cpu->num_pri_bits - 1) <<
74 ICC_CTLR_EL1_PRI_BITS_SHIFT;
75 val |= vgic_v3_cpu->num_id_bits << ICC_CTLR_EL1_ID_BITS_SHIFT;
76 val |= ((kvm_vgic_global_state.ich_vtr_el2 &
77 ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT) <<
78 ICC_CTLR_EL1_SEIS_SHIFT;
79 val |= ((kvm_vgic_global_state.ich_vtr_el2 &
80 ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT) <<
81 ICC_CTLR_EL1_A3V_SHIFT;
82 /*
83 * The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
84 * Extract it directly using ICC_CTLR_EL1 reg definitions.
85 */
86 val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK;
87 val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;
88
89 p->regval = val;
90 }
91
92 return true;
93 }
94
access_gic_pmr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)95 static bool access_gic_pmr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
96 const struct sys_reg_desc *r)
97 {
98 struct vgic_vmcr vmcr;
99
100 vgic_get_vmcr(vcpu, &vmcr);
101 if (p->is_write) {
102 vmcr.pmr = (p->regval & ICC_PMR_EL1_MASK) >> ICC_PMR_EL1_SHIFT;
103 vgic_set_vmcr(vcpu, &vmcr);
104 } else {
105 p->regval = (vmcr.pmr << ICC_PMR_EL1_SHIFT) & ICC_PMR_EL1_MASK;
106 }
107
108 return true;
109 }
110
access_gic_bpr0(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)111 static bool access_gic_bpr0(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
112 const struct sys_reg_desc *r)
113 {
114 struct vgic_vmcr vmcr;
115
116 vgic_get_vmcr(vcpu, &vmcr);
117 if (p->is_write) {
118 vmcr.bpr = (p->regval & ICC_BPR0_EL1_MASK) >>
119 ICC_BPR0_EL1_SHIFT;
120 vgic_set_vmcr(vcpu, &vmcr);
121 } else {
122 p->regval = (vmcr.bpr << ICC_BPR0_EL1_SHIFT) &
123 ICC_BPR0_EL1_MASK;
124 }
125
126 return true;
127 }
128
access_gic_bpr1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)129 static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
130 const struct sys_reg_desc *r)
131 {
132 struct vgic_vmcr vmcr;
133
134 if (!p->is_write)
135 p->regval = 0;
136
137 vgic_get_vmcr(vcpu, &vmcr);
138 if (!vmcr.cbpr) {
139 if (p->is_write) {
140 vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
141 ICC_BPR1_EL1_SHIFT;
142 vgic_set_vmcr(vcpu, &vmcr);
143 } else {
144 p->regval = (vmcr.abpr << ICC_BPR1_EL1_SHIFT) &
145 ICC_BPR1_EL1_MASK;
146 }
147 } else {
148 if (!p->is_write)
149 p->regval = min((vmcr.bpr + 1), 7U);
150 }
151
152 return true;
153 }
154
access_gic_grpen0(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)155 static bool access_gic_grpen0(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
156 const struct sys_reg_desc *r)
157 {
158 struct vgic_vmcr vmcr;
159
160 vgic_get_vmcr(vcpu, &vmcr);
161 if (p->is_write) {
162 vmcr.grpen0 = (p->regval & ICC_IGRPEN0_EL1_MASK) >>
163 ICC_IGRPEN0_EL1_SHIFT;
164 vgic_set_vmcr(vcpu, &vmcr);
165 } else {
166 p->regval = (vmcr.grpen0 << ICC_IGRPEN0_EL1_SHIFT) &
167 ICC_IGRPEN0_EL1_MASK;
168 }
169
170 return true;
171 }
172
access_gic_grpen1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)173 static bool access_gic_grpen1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
174 const struct sys_reg_desc *r)
175 {
176 struct vgic_vmcr vmcr;
177
178 vgic_get_vmcr(vcpu, &vmcr);
179 if (p->is_write) {
180 vmcr.grpen1 = (p->regval & ICC_IGRPEN1_EL1_MASK) >>
181 ICC_IGRPEN1_EL1_SHIFT;
182 vgic_set_vmcr(vcpu, &vmcr);
183 } else {
184 p->regval = (vmcr.grpen1 << ICC_IGRPEN1_EL1_SHIFT) &
185 ICC_IGRPEN1_EL1_MASK;
186 }
187
188 return true;
189 }
190
vgic_v3_access_apr_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,u8 apr,u8 idx)191 static void vgic_v3_access_apr_reg(struct kvm_vcpu *vcpu,
192 struct sys_reg_params *p, u8 apr, u8 idx)
193 {
194 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
195 uint32_t *ap_reg;
196
197 if (apr)
198 ap_reg = &vgicv3->vgic_ap1r[idx];
199 else
200 ap_reg = &vgicv3->vgic_ap0r[idx];
201
202 if (p->is_write)
203 *ap_reg = p->regval;
204 else
205 p->regval = *ap_reg;
206 }
207
access_gic_aprn(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r,u8 apr)208 static bool access_gic_aprn(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
209 const struct sys_reg_desc *r, u8 apr)
210 {
211 u8 idx = r->Op2 & 3;
212
213 if (idx > vgic_v3_max_apr_idx(vcpu))
214 goto err;
215
216 vgic_v3_access_apr_reg(vcpu, p, apr, idx);
217 return true;
218 err:
219 if (!p->is_write)
220 p->regval = 0;
221
222 return false;
223 }
224
access_gic_ap0r(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)225 static bool access_gic_ap0r(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
226 const struct sys_reg_desc *r)
227
228 {
229 return access_gic_aprn(vcpu, p, r, 0);
230 }
231
access_gic_ap1r(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)232 static bool access_gic_ap1r(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
233 const struct sys_reg_desc *r)
234 {
235 return access_gic_aprn(vcpu, p, r, 1);
236 }
237
access_gic_sre(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)238 static bool access_gic_sre(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
239 const struct sys_reg_desc *r)
240 {
241 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
242
243 /* Validate SRE bit */
244 if (p->is_write) {
245 if (!(p->regval & ICC_SRE_EL1_SRE))
246 return false;
247 } else {
248 p->regval = vgicv3->vgic_sre;
249 }
250
251 return true;
252 }
253 static const struct sys_reg_desc gic_v3_icc_reg_descs[] = {
254 { SYS_DESC(SYS_ICC_PMR_EL1), access_gic_pmr },
255 { SYS_DESC(SYS_ICC_BPR0_EL1), access_gic_bpr0 },
256 { SYS_DESC(SYS_ICC_AP0R0_EL1), access_gic_ap0r },
257 { SYS_DESC(SYS_ICC_AP0R1_EL1), access_gic_ap0r },
258 { SYS_DESC(SYS_ICC_AP0R2_EL1), access_gic_ap0r },
259 { SYS_DESC(SYS_ICC_AP0R3_EL1), access_gic_ap0r },
260 { SYS_DESC(SYS_ICC_AP1R0_EL1), access_gic_ap1r },
261 { SYS_DESC(SYS_ICC_AP1R1_EL1), access_gic_ap1r },
262 { SYS_DESC(SYS_ICC_AP1R2_EL1), access_gic_ap1r },
263 { SYS_DESC(SYS_ICC_AP1R3_EL1), access_gic_ap1r },
264 { SYS_DESC(SYS_ICC_BPR1_EL1), access_gic_bpr1 },
265 { SYS_DESC(SYS_ICC_CTLR_EL1), access_gic_ctlr },
266 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
267 { SYS_DESC(SYS_ICC_IGRPEN0_EL1), access_gic_grpen0 },
268 { SYS_DESC(SYS_ICC_IGRPEN1_EL1), access_gic_grpen1 },
269 };
270
vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu * vcpu,bool is_write,u64 id,u64 * reg)271 int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id,
272 u64 *reg)
273 {
274 struct sys_reg_params params;
275 u64 sysreg = (id & KVM_DEV_ARM_VGIC_SYSREG_MASK) | KVM_REG_SIZE_U64;
276
277 params.regval = *reg;
278 params.is_write = is_write;
279 params.is_aarch32 = false;
280 params.is_32bit = false;
281
282 if (find_reg_by_id(sysreg, ¶ms, gic_v3_icc_reg_descs,
283 ARRAY_SIZE(gic_v3_icc_reg_descs)))
284 return 0;
285
286 return -ENXIO;
287 }
288
vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu * vcpu,bool is_write,u64 id,u64 * reg)289 int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write, u64 id,
290 u64 *reg)
291 {
292 struct sys_reg_params params;
293 const struct sys_reg_desc *r;
294 u64 sysreg = (id & KVM_DEV_ARM_VGIC_SYSREG_MASK) | KVM_REG_SIZE_U64;
295
296 if (is_write)
297 params.regval = *reg;
298 params.is_write = is_write;
299 params.is_aarch32 = false;
300 params.is_32bit = false;
301
302 r = find_reg_by_id(sysreg, ¶ms, gic_v3_icc_reg_descs,
303 ARRAY_SIZE(gic_v3_icc_reg_descs));
304 if (!r)
305 return -ENXIO;
306
307 if (!r->access(vcpu, ¶ms, r))
308 return -EINVAL;
309
310 if (!is_write)
311 *reg = params.regval;
312
313 return 0;
314 }
315