1 /*
2  * (not much of an) Emulation layer for 32bit guests.
3  *
4  * Copyright (C) 2012,2013 - ARM Ltd
5  * Author: Marc Zyngier <marc.zyngier@arm.com>
6  *
7  * based on arch/arm/kvm/emulate.c
8  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
9  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
10  *
11  * This program is free software: you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
22  */
23 
24 #include <linux/kvm_host.h>
25 #include <asm/kvm_emulate.h>
26 #include <asm/kvm_hyp.h>
27 
28 /*
29  * stolen from arch/arm/kernel/opcodes.c
30  *
31  * condition code lookup table
32  * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
33  *
34  * bit position in short is condition code: NZCV
35  */
36 static const unsigned short cc_map[16] = {
37 	0xF0F0,			/* EQ == Z set            */
38 	0x0F0F,			/* NE                     */
39 	0xCCCC,			/* CS == C set            */
40 	0x3333,			/* CC                     */
41 	0xFF00,			/* MI == N set            */
42 	0x00FF,			/* PL                     */
43 	0xAAAA,			/* VS == V set            */
44 	0x5555,			/* VC                     */
45 	0x0C0C,			/* HI == C set && Z clear */
46 	0xF3F3,			/* LS == C clear || Z set */
47 	0xAA55,			/* GE == (N==V)           */
48 	0x55AA,			/* LT == (N!=V)           */
49 	0x0A05,			/* GT == (!Z && (N==V))   */
50 	0xF5FA,			/* LE == (Z || (N!=V))    */
51 	0xFFFF,			/* AL always              */
52 	0			/* NV                     */
53 };
54 
55 /*
56  * Check if a trapped instruction should have been executed or not.
57  */
kvm_condition_valid32(const struct kvm_vcpu * vcpu)58 bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
59 {
60 	unsigned long cpsr;
61 	u32 cpsr_cond;
62 	int cond;
63 
64 	/* Top two bits non-zero?  Unconditional. */
65 	if (kvm_vcpu_get_hsr(vcpu) >> 30)
66 		return true;
67 
68 	/* Is condition field valid? */
69 	cond = kvm_vcpu_get_condition(vcpu);
70 	if (cond == 0xE)
71 		return true;
72 
73 	cpsr = *vcpu_cpsr(vcpu);
74 
75 	if (cond < 0) {
76 		/* This can happen in Thumb mode: examine IT state. */
77 		unsigned long it;
78 
79 		it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
80 
81 		/* it == 0 => unconditional. */
82 		if (it == 0)
83 			return true;
84 
85 		/* The cond for this insn works out as the top 4 bits. */
86 		cond = (it >> 4);
87 	}
88 
89 	cpsr_cond = cpsr >> 28;
90 
91 	if (!((cc_map[cond] >> cpsr_cond) & 1))
92 		return false;
93 
94 	return true;
95 }
96 
97 /**
98  * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
99  * @vcpu:	The VCPU pointer
100  *
101  * When exceptions occur while instructions are executed in Thumb IF-THEN
102  * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
103  * to do this little bit of work manually. The fields map like this:
104  *
105  * IT[7:0] -> CPSR[26:25],CPSR[15:10]
106  */
kvm_adjust_itstate(struct kvm_vcpu * vcpu)107 static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
108 {
109 	unsigned long itbits, cond;
110 	unsigned long cpsr = *vcpu_cpsr(vcpu);
111 	bool is_arm = !(cpsr & PSR_AA32_T_BIT);
112 
113 	if (is_arm || !(cpsr & PSR_AA32_IT_MASK))
114 		return;
115 
116 	cond = (cpsr & 0xe000) >> 13;
117 	itbits = (cpsr & 0x1c00) >> (10 - 2);
118 	itbits |= (cpsr & (0x3 << 25)) >> 25;
119 
120 	/* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */
121 	if ((itbits & 0x7) == 0)
122 		itbits = cond = 0;
123 	else
124 		itbits = (itbits << 1) & 0x1f;
125 
126 	cpsr &= ~PSR_AA32_IT_MASK;
127 	cpsr |= cond << 13;
128 	cpsr |= (itbits & 0x1c) << (10 - 2);
129 	cpsr |= (itbits & 0x3) << 25;
130 	*vcpu_cpsr(vcpu) = cpsr;
131 }
132 
133 /**
134  * kvm_skip_instr - skip a trapped instruction and proceed to the next
135  * @vcpu: The vcpu pointer
136  */
kvm_skip_instr32(struct kvm_vcpu * vcpu,bool is_wide_instr)137 void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
138 {
139 	bool is_thumb;
140 
141 	is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
142 	if (is_thumb && !is_wide_instr)
143 		*vcpu_pc(vcpu) += 2;
144 	else
145 		*vcpu_pc(vcpu) += 4;
146 	kvm_adjust_itstate(vcpu);
147 }
148 
149 /*
150  * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
151  */
152 static const u8 return_offsets[8][2] = {
153 	[0] = { 0, 0 },		/* Reset, unused */
154 	[1] = { 4, 2 },		/* Undefined */
155 	[2] = { 0, 0 },		/* SVC, unused */
156 	[3] = { 4, 4 },		/* Prefetch abort */
157 	[4] = { 8, 8 },		/* Data abort */
158 	[5] = { 0, 0 },		/* HVC, unused */
159 	[6] = { 4, 4 },		/* IRQ, unused */
160 	[7] = { 4, 4 },		/* FIQ, unused */
161 };
162 
prepare_fault32(struct kvm_vcpu * vcpu,u32 mode,u32 vect_offset)163 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
164 {
165 	unsigned long cpsr;
166 	unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
167 	bool is_thumb = (new_spsr_value & PSR_AA32_T_BIT);
168 	u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
169 	u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
170 
171 	cpsr = mode | PSR_AA32_I_BIT;
172 
173 	if (sctlr & (1 << 30))
174 		cpsr |= PSR_AA32_T_BIT;
175 	if (sctlr & (1 << 25))
176 		cpsr |= PSR_AA32_E_BIT;
177 
178 	*vcpu_cpsr(vcpu) = cpsr;
179 
180 	/* Note: These now point to the banked copies */
181 	vcpu_write_spsr(vcpu, new_spsr_value);
182 	*vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
183 
184 	/* Branch to exception vector */
185 	if (sctlr & (1 << 13))
186 		vect_offset += 0xffff0000;
187 	else /* always have security exceptions */
188 		vect_offset += vcpu_cp15(vcpu, c12_VBAR);
189 
190 	*vcpu_pc(vcpu) = vect_offset;
191 }
192 
kvm_inject_undef32(struct kvm_vcpu * vcpu)193 void kvm_inject_undef32(struct kvm_vcpu *vcpu)
194 {
195 	prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
196 }
197 
198 /*
199  * Modelled after TakeDataAbortException() and TakePrefetchAbortException
200  * pseudocode.
201  */
inject_abt32(struct kvm_vcpu * vcpu,bool is_pabt,unsigned long addr)202 static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
203 			 unsigned long addr)
204 {
205 	u32 vect_offset;
206 	u32 *far, *fsr;
207 	bool is_lpae;
208 
209 	if (is_pabt) {
210 		vect_offset = 12;
211 		far = &vcpu_cp15(vcpu, c6_IFAR);
212 		fsr = &vcpu_cp15(vcpu, c5_IFSR);
213 	} else { /* !iabt */
214 		vect_offset = 16;
215 		far = &vcpu_cp15(vcpu, c6_DFAR);
216 		fsr = &vcpu_cp15(vcpu, c5_DFSR);
217 	}
218 
219 	prepare_fault32(vcpu, PSR_AA32_MODE_ABT | PSR_AA32_A_BIT, vect_offset);
220 
221 	*far = addr;
222 
223 	/* Give the guest an IMPLEMENTATION DEFINED exception */
224 	is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
225 	if (is_lpae)
226 		*fsr = 1 << 9 | 0x34;
227 	else
228 		*fsr = 0x14;
229 }
230 
kvm_inject_dabt32(struct kvm_vcpu * vcpu,unsigned long addr)231 void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
232 {
233 	inject_abt32(vcpu, false, addr);
234 }
235 
kvm_inject_pabt32(struct kvm_vcpu * vcpu,unsigned long addr)236 void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr)
237 {
238 	inject_abt32(vcpu, true, addr);
239 }
240