1 /*
2  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License, version 2, as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  */
18 
19 #ifndef __ARM_KVM_EMULATE_H__
20 #define __ARM_KVM_EMULATE_H__
21 
22 #include <linux/kvm_host.h>
23 #include <asm/kvm_asm.h>
24 #include <asm/kvm_mmio.h>
25 #include <asm/kvm_arm.h>
26 #include <asm/cputype.h>
27 
28 /* arm64 compatibility macros */
29 #define PSR_AA32_MODE_ABT	ABT_MODE
30 #define PSR_AA32_MODE_UND	UND_MODE
31 #define PSR_AA32_T_BIT		PSR_T_BIT
32 #define PSR_AA32_I_BIT		PSR_I_BIT
33 #define PSR_AA32_A_BIT		PSR_A_BIT
34 #define PSR_AA32_E_BIT		PSR_E_BIT
35 #define PSR_AA32_IT_MASK	PSR_IT_MASK
36 
37 unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
38 
vcpu_reg32(struct kvm_vcpu * vcpu,u8 reg_num)39 static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num)
40 {
41 	return vcpu_reg(vcpu, reg_num);
42 }
43 
44 unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu);
45 
vpcu_read_spsr(struct kvm_vcpu * vcpu)46 static inline unsigned long vpcu_read_spsr(struct kvm_vcpu *vcpu)
47 {
48 	return *__vcpu_spsr(vcpu);
49 }
50 
vcpu_write_spsr(struct kvm_vcpu * vcpu,unsigned long v)51 static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
52 {
53 	*__vcpu_spsr(vcpu) = v;
54 }
55 
vcpu_get_reg(struct kvm_vcpu * vcpu,u8 reg_num)56 static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
57 					 u8 reg_num)
58 {
59 	return *vcpu_reg(vcpu, reg_num);
60 }
61 
vcpu_set_reg(struct kvm_vcpu * vcpu,u8 reg_num,unsigned long val)62 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
63 				unsigned long val)
64 {
65 	*vcpu_reg(vcpu, reg_num) = val;
66 }
67 
68 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
69 void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
70 void kvm_inject_undef32(struct kvm_vcpu *vcpu);
71 void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
72 void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
73 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
74 
kvm_inject_undefined(struct kvm_vcpu * vcpu)75 static inline void kvm_inject_undefined(struct kvm_vcpu *vcpu)
76 {
77 	kvm_inject_undef32(vcpu);
78 }
79 
kvm_inject_dabt(struct kvm_vcpu * vcpu,unsigned long addr)80 static inline void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
81 {
82 	kvm_inject_dabt32(vcpu, addr);
83 }
84 
kvm_inject_pabt(struct kvm_vcpu * vcpu,unsigned long addr)85 static inline void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
86 {
87 	kvm_inject_pabt32(vcpu, addr);
88 }
89 
kvm_condition_valid(const struct kvm_vcpu * vcpu)90 static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
91 {
92 	return kvm_condition_valid32(vcpu);
93 }
94 
kvm_skip_instr(struct kvm_vcpu * vcpu,bool is_wide_instr)95 static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
96 {
97 	kvm_skip_instr32(vcpu, is_wide_instr);
98 }
99 
vcpu_reset_hcr(struct kvm_vcpu * vcpu)100 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
101 {
102 	vcpu->arch.hcr = HCR_GUEST_MASK;
103 }
104 
vcpu_hcr(const struct kvm_vcpu * vcpu)105 static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
106 {
107 	return (unsigned long *)&vcpu->arch.hcr;
108 }
109 
vcpu_clear_wfe_traps(struct kvm_vcpu * vcpu)110 static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
111 {
112 	vcpu->arch.hcr &= ~HCR_TWE;
113 }
114 
vcpu_set_wfe_traps(struct kvm_vcpu * vcpu)115 static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
116 {
117 	vcpu->arch.hcr |= HCR_TWE;
118 }
119 
vcpu_mode_is_32bit(const struct kvm_vcpu * vcpu)120 static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
121 {
122 	return true;
123 }
124 
vcpu_pc(struct kvm_vcpu * vcpu)125 static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
126 {
127 	return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
128 }
129 
vcpu_cpsr(const struct kvm_vcpu * vcpu)130 static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
131 {
132 	return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
133 }
134 
vcpu_set_thumb(struct kvm_vcpu * vcpu)135 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
136 {
137 	*vcpu_cpsr(vcpu) |= PSR_T_BIT;
138 }
139 
mode_has_spsr(struct kvm_vcpu * vcpu)140 static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
141 {
142 	unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
143 	return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
144 }
145 
vcpu_mode_priv(struct kvm_vcpu * vcpu)146 static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
147 {
148 	unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
149 	return cpsr_mode > USR_MODE;
150 }
151 
kvm_vcpu_get_hsr(const struct kvm_vcpu * vcpu)152 static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
153 {
154 	return vcpu->arch.fault.hsr;
155 }
156 
kvm_vcpu_get_condition(const struct kvm_vcpu * vcpu)157 static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
158 {
159 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
160 
161 	if (hsr & HSR_CV)
162 		return (hsr & HSR_COND) >> HSR_COND_SHIFT;
163 
164 	return -1;
165 }
166 
kvm_vcpu_get_hfar(struct kvm_vcpu * vcpu)167 static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
168 {
169 	return vcpu->arch.fault.hxfar;
170 }
171 
kvm_vcpu_get_fault_ipa(struct kvm_vcpu * vcpu)172 static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
173 {
174 	return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
175 }
176 
kvm_vcpu_dabt_isvalid(struct kvm_vcpu * vcpu)177 static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
178 {
179 	return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
180 }
181 
kvm_vcpu_dabt_iswrite(struct kvm_vcpu * vcpu)182 static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
183 {
184 	return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
185 }
186 
kvm_vcpu_dabt_issext(struct kvm_vcpu * vcpu)187 static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
188 {
189 	return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
190 }
191 
kvm_vcpu_dabt_get_rd(struct kvm_vcpu * vcpu)192 static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
193 {
194 	return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
195 }
196 
kvm_vcpu_dabt_iss1tw(struct kvm_vcpu * vcpu)197 static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
198 {
199 	return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
200 }
201 
kvm_vcpu_dabt_is_cm(struct kvm_vcpu * vcpu)202 static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
203 {
204 	return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM);
205 }
206 
207 /* Get Access Size from a data abort */
kvm_vcpu_dabt_get_as(struct kvm_vcpu * vcpu)208 static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
209 {
210 	switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
211 	case 0:
212 		return 1;
213 	case 1:
214 		return 2;
215 	case 2:
216 		return 4;
217 	default:
218 		kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
219 		return -EFAULT;
220 	}
221 }
222 
223 /* This one is not specific to Data Abort */
kvm_vcpu_trap_il_is32bit(struct kvm_vcpu * vcpu)224 static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
225 {
226 	return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
227 }
228 
kvm_vcpu_trap_get_class(struct kvm_vcpu * vcpu)229 static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
230 {
231 	return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
232 }
233 
kvm_vcpu_trap_is_iabt(struct kvm_vcpu * vcpu)234 static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
235 {
236 	return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
237 }
238 
kvm_vcpu_trap_get_fault(struct kvm_vcpu * vcpu)239 static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
240 {
241 	return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;
242 }
243 
kvm_vcpu_trap_get_fault_type(struct kvm_vcpu * vcpu)244 static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu)
245 {
246 	return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
247 }
248 
kvm_vcpu_dabt_isextabt(struct kvm_vcpu * vcpu)249 static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
250 {
251 	switch (kvm_vcpu_trap_get_fault(vcpu)) {
252 	case FSC_SEA:
253 	case FSC_SEA_TTW0:
254 	case FSC_SEA_TTW1:
255 	case FSC_SEA_TTW2:
256 	case FSC_SEA_TTW3:
257 	case FSC_SECC:
258 	case FSC_SECC_TTW0:
259 	case FSC_SECC_TTW1:
260 	case FSC_SECC_TTW2:
261 	case FSC_SECC_TTW3:
262 		return true;
263 	default:
264 		return false;
265 	}
266 }
267 
kvm_vcpu_hvc_get_imm(struct kvm_vcpu * vcpu)268 static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
269 {
270 	return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
271 }
272 
kvm_vcpu_get_mpidr_aff(struct kvm_vcpu * vcpu)273 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
274 {
275 	return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK;
276 }
277 
kvm_vcpu_set_be(struct kvm_vcpu * vcpu)278 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
279 {
280 	*vcpu_cpsr(vcpu) |= PSR_E_BIT;
281 }
282 
kvm_vcpu_is_be(struct kvm_vcpu * vcpu)283 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
284 {
285 	return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
286 }
287 
vcpu_data_guest_to_host(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)288 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
289 						    unsigned long data,
290 						    unsigned int len)
291 {
292 	if (kvm_vcpu_is_be(vcpu)) {
293 		switch (len) {
294 		case 1:
295 			return data & 0xff;
296 		case 2:
297 			return be16_to_cpu(data & 0xffff);
298 		default:
299 			return be32_to_cpu(data);
300 		}
301 	} else {
302 		switch (len) {
303 		case 1:
304 			return data & 0xff;
305 		case 2:
306 			return le16_to_cpu(data & 0xffff);
307 		default:
308 			return le32_to_cpu(data);
309 		}
310 	}
311 }
312 
vcpu_data_host_to_guest(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)313 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
314 						    unsigned long data,
315 						    unsigned int len)
316 {
317 	if (kvm_vcpu_is_be(vcpu)) {
318 		switch (len) {
319 		case 1:
320 			return data & 0xff;
321 		case 2:
322 			return cpu_to_be16(data & 0xffff);
323 		default:
324 			return cpu_to_be32(data);
325 		}
326 	} else {
327 		switch (len) {
328 		case 1:
329 			return data & 0xff;
330 		case 2:
331 			return cpu_to_le16(data & 0xffff);
332 		default:
333 			return cpu_to_le32(data);
334 		}
335 	}
336 }
337 
338 #endif /* __ARM_KVM_EMULATE_H__ */
339