1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 *
4 * Copyright IBM Corp. 2007
5 * Copyright 2011 Freescale Semiconductor, Inc.
6 *
7 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
8 */
9
10 #include <linux/jiffies.h>
11 #include <linux/hrtimer.h>
12 #include <linux/types.h>
13 #include <linux/string.h>
14 #include <linux/kvm_host.h>
15 #include <linux/clockchips.h>
16
17 #include <asm/reg.h>
18 #include <asm/time.h>
19 #include <asm/byteorder.h>
20 #include <asm/kvm_ppc.h>
21 #include <asm/disassemble.h>
22 #include <asm/ppc-opcode.h>
23 #include <asm/sstep.h>
24 #include "timing.h"
25 #include "trace.h"
26
27 #ifdef CONFIG_PPC_FPU
kvmppc_check_fp_disabled(struct kvm_vcpu * vcpu)28 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
29 {
30 if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
31 kvmppc_core_queue_fpunavail(vcpu);
32 return true;
33 }
34
35 return false;
36 }
37 #endif /* CONFIG_PPC_FPU */
38
39 #ifdef CONFIG_VSX
kvmppc_check_vsx_disabled(struct kvm_vcpu * vcpu)40 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
41 {
42 if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
43 kvmppc_core_queue_vsx_unavail(vcpu);
44 return true;
45 }
46
47 return false;
48 }
49 #endif /* CONFIG_VSX */
50
51 #ifdef CONFIG_ALTIVEC
kvmppc_check_altivec_disabled(struct kvm_vcpu * vcpu)52 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
53 {
54 if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
55 kvmppc_core_queue_vec_unavail(vcpu);
56 return true;
57 }
58
59 return false;
60 }
61 #endif /* CONFIG_ALTIVEC */
62
63 /*
64 * XXX to do:
65 * lfiwax, lfiwzx
66 * vector loads and stores
67 *
68 * Instructions that trap when used on cache-inhibited mappings
69 * are not emulated here: multiple and string instructions,
70 * lq/stq, and the load-reserve/store-conditional instructions.
71 */
kvmppc_emulate_loadstore(struct kvm_vcpu * vcpu)72 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
73 {
74 u32 inst;
75 enum emulation_result emulated = EMULATE_FAIL;
76 int advance = 1;
77 struct instruction_op op;
78
79 /* this default type might be overwritten by subcategories */
80 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
81
82 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
83 if (emulated != EMULATE_DONE)
84 return emulated;
85
86 vcpu->arch.mmio_vsx_copy_nums = 0;
87 vcpu->arch.mmio_vsx_offset = 0;
88 vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
89 vcpu->arch.mmio_sp64_extend = 0;
90 vcpu->arch.mmio_sign_extend = 0;
91 vcpu->arch.mmio_vmx_copy_nums = 0;
92 vcpu->arch.mmio_vmx_offset = 0;
93 vcpu->arch.mmio_host_swabbed = 0;
94
95 emulated = EMULATE_FAIL;
96 vcpu->arch.regs.msr = vcpu->arch.shared->msr;
97 if (analyse_instr(&op, &vcpu->arch.regs, ppc_inst(inst)) == 0) {
98 int type = op.type & INSTR_TYPE_MASK;
99 int size = GETSIZE(op.type);
100
101 switch (type) {
102 case LOAD: {
103 int instr_byte_swap = op.type & BYTEREV;
104
105 if (op.type & SIGNEXT)
106 emulated = kvmppc_handle_loads(vcpu,
107 op.reg, size, !instr_byte_swap);
108 else
109 emulated = kvmppc_handle_load(vcpu,
110 op.reg, size, !instr_byte_swap);
111
112 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
113 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
114
115 break;
116 }
117 #ifdef CONFIG_PPC_FPU
118 case LOAD_FP:
119 if (kvmppc_check_fp_disabled(vcpu))
120 return EMULATE_DONE;
121
122 if (op.type & FPCONV)
123 vcpu->arch.mmio_sp64_extend = 1;
124
125 if (op.type & SIGNEXT)
126 emulated = kvmppc_handle_loads(vcpu,
127 KVM_MMIO_REG_FPR|op.reg, size, 1);
128 else
129 emulated = kvmppc_handle_load(vcpu,
130 KVM_MMIO_REG_FPR|op.reg, size, 1);
131
132 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
133 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
134
135 break;
136 #endif
137 #ifdef CONFIG_ALTIVEC
138 case LOAD_VMX:
139 if (kvmppc_check_altivec_disabled(vcpu))
140 return EMULATE_DONE;
141
142 /* Hardware enforces alignment of VMX accesses */
143 vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
144 vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
145
146 if (size == 16) { /* lvx */
147 vcpu->arch.mmio_copy_type =
148 KVMPPC_VMX_COPY_DWORD;
149 } else if (size == 4) { /* lvewx */
150 vcpu->arch.mmio_copy_type =
151 KVMPPC_VMX_COPY_WORD;
152 } else if (size == 2) { /* lvehx */
153 vcpu->arch.mmio_copy_type =
154 KVMPPC_VMX_COPY_HWORD;
155 } else if (size == 1) { /* lvebx */
156 vcpu->arch.mmio_copy_type =
157 KVMPPC_VMX_COPY_BYTE;
158 } else
159 break;
160
161 vcpu->arch.mmio_vmx_offset =
162 (vcpu->arch.vaddr_accessed & 0xf)/size;
163
164 if (size == 16) {
165 vcpu->arch.mmio_vmx_copy_nums = 2;
166 emulated = kvmppc_handle_vmx_load(vcpu,
167 KVM_MMIO_REG_VMX|op.reg,
168 8, 1);
169 } else {
170 vcpu->arch.mmio_vmx_copy_nums = 1;
171 emulated = kvmppc_handle_vmx_load(vcpu,
172 KVM_MMIO_REG_VMX|op.reg,
173 size, 1);
174 }
175 break;
176 #endif
177 #ifdef CONFIG_VSX
178 case LOAD_VSX: {
179 int io_size_each;
180
181 if (op.vsx_flags & VSX_CHECK_VEC) {
182 if (kvmppc_check_altivec_disabled(vcpu))
183 return EMULATE_DONE;
184 } else {
185 if (kvmppc_check_vsx_disabled(vcpu))
186 return EMULATE_DONE;
187 }
188
189 if (op.vsx_flags & VSX_FPCONV)
190 vcpu->arch.mmio_sp64_extend = 1;
191
192 if (op.element_size == 8) {
193 if (op.vsx_flags & VSX_SPLAT)
194 vcpu->arch.mmio_copy_type =
195 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
196 else
197 vcpu->arch.mmio_copy_type =
198 KVMPPC_VSX_COPY_DWORD;
199 } else if (op.element_size == 4) {
200 if (op.vsx_flags & VSX_SPLAT)
201 vcpu->arch.mmio_copy_type =
202 KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
203 else
204 vcpu->arch.mmio_copy_type =
205 KVMPPC_VSX_COPY_WORD;
206 } else
207 break;
208
209 if (size < op.element_size) {
210 /* precision convert case: lxsspx, etc */
211 vcpu->arch.mmio_vsx_copy_nums = 1;
212 io_size_each = size;
213 } else { /* lxvw4x, lxvd2x, etc */
214 vcpu->arch.mmio_vsx_copy_nums =
215 size/op.element_size;
216 io_size_each = op.element_size;
217 }
218
219 emulated = kvmppc_handle_vsx_load(vcpu,
220 KVM_MMIO_REG_VSX|op.reg, io_size_each,
221 1, op.type & SIGNEXT);
222 break;
223 }
224 #endif
225 case STORE:
226 /* if need byte reverse, op.val has been reversed by
227 * analyse_instr().
228 */
229 emulated = kvmppc_handle_store(vcpu, op.val, size, 1);
230
231 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
232 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
233
234 break;
235 #ifdef CONFIG_PPC_FPU
236 case STORE_FP:
237 if (kvmppc_check_fp_disabled(vcpu))
238 return EMULATE_DONE;
239
240 /* The FP registers need to be flushed so that
241 * kvmppc_handle_store() can read actual FP vals
242 * from vcpu->arch.
243 */
244 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
245 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
246 MSR_FP);
247
248 if (op.type & FPCONV)
249 vcpu->arch.mmio_sp64_extend = 1;
250
251 emulated = kvmppc_handle_store(vcpu,
252 VCPU_FPR(vcpu, op.reg), size, 1);
253
254 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
255 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
256
257 break;
258 #endif
259 #ifdef CONFIG_ALTIVEC
260 case STORE_VMX:
261 if (kvmppc_check_altivec_disabled(vcpu))
262 return EMULATE_DONE;
263
264 /* Hardware enforces alignment of VMX accesses. */
265 vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
266 vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
267
268 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
269 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
270 MSR_VEC);
271 if (size == 16) { /* stvx */
272 vcpu->arch.mmio_copy_type =
273 KVMPPC_VMX_COPY_DWORD;
274 } else if (size == 4) { /* stvewx */
275 vcpu->arch.mmio_copy_type =
276 KVMPPC_VMX_COPY_WORD;
277 } else if (size == 2) { /* stvehx */
278 vcpu->arch.mmio_copy_type =
279 KVMPPC_VMX_COPY_HWORD;
280 } else if (size == 1) { /* stvebx */
281 vcpu->arch.mmio_copy_type =
282 KVMPPC_VMX_COPY_BYTE;
283 } else
284 break;
285
286 vcpu->arch.mmio_vmx_offset =
287 (vcpu->arch.vaddr_accessed & 0xf)/size;
288
289 if (size == 16) {
290 vcpu->arch.mmio_vmx_copy_nums = 2;
291 emulated = kvmppc_handle_vmx_store(vcpu,
292 op.reg, 8, 1);
293 } else {
294 vcpu->arch.mmio_vmx_copy_nums = 1;
295 emulated = kvmppc_handle_vmx_store(vcpu,
296 op.reg, size, 1);
297 }
298
299 break;
300 #endif
301 #ifdef CONFIG_VSX
302 case STORE_VSX: {
303 int io_size_each;
304
305 if (op.vsx_flags & VSX_CHECK_VEC) {
306 if (kvmppc_check_altivec_disabled(vcpu))
307 return EMULATE_DONE;
308 } else {
309 if (kvmppc_check_vsx_disabled(vcpu))
310 return EMULATE_DONE;
311 }
312
313 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
314 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
315 MSR_VSX);
316
317 if (op.vsx_flags & VSX_FPCONV)
318 vcpu->arch.mmio_sp64_extend = 1;
319
320 if (op.element_size == 8)
321 vcpu->arch.mmio_copy_type =
322 KVMPPC_VSX_COPY_DWORD;
323 else if (op.element_size == 4)
324 vcpu->arch.mmio_copy_type =
325 KVMPPC_VSX_COPY_WORD;
326 else
327 break;
328
329 if (size < op.element_size) {
330 /* precise conversion case, like stxsspx */
331 vcpu->arch.mmio_vsx_copy_nums = 1;
332 io_size_each = size;
333 } else { /* stxvw4x, stxvd2x, etc */
334 vcpu->arch.mmio_vsx_copy_nums =
335 size/op.element_size;
336 io_size_each = op.element_size;
337 }
338
339 emulated = kvmppc_handle_vsx_store(vcpu,
340 op.reg, io_size_each, 1);
341 break;
342 }
343 #endif
344 case CACHEOP:
345 /* Do nothing. The guest is performing dcbi because
346 * hardware DMA is not snooped by the dcache, but
347 * emulated DMA either goes through the dcache as
348 * normal writes, or the host kernel has handled dcache
349 * coherence.
350 */
351 emulated = EMULATE_DONE;
352 break;
353 default:
354 break;
355 }
356 }
357
358 if (emulated == EMULATE_FAIL) {
359 advance = 0;
360 kvmppc_core_queue_program(vcpu, 0);
361 }
362
363 trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
364
365 /* Advance past emulated instruction. */
366 if (advance)
367 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
368
369 return emulated;
370 }
371