1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2010
16 * Copyright 2010-2011 Freescale Semiconductor, Inc.
17 *
18 * Authors: Alexander Graf <agraf@suse.de>
19 */
20
21#include <asm/ppc_asm.h>
22#include <asm/kvm_asm.h>
23#include <asm/reg.h>
24#include <asm/page.h>
25#include <asm/asm-offsets.h>
26#include <asm/asm-compat.h>
27
28#define KVM_MAGIC_PAGE		(-4096)
29
30#ifdef CONFIG_64BIT
31#define LL64(reg, offs, reg2)	ld	reg, (offs)(reg2)
32#define STL64(reg, offs, reg2)	std	reg, (offs)(reg2)
33#else
34#define LL64(reg, offs, reg2)	lwz	reg, (offs + 4)(reg2)
35#define STL64(reg, offs, reg2)	stw	reg, (offs + 4)(reg2)
36#endif
37
38#define SCRATCH_SAVE							\
39	/* Enable critical section. We are critical if			\
40	   shared->critical == r1 */					\
41	STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);		\
42									\
43	/* Save state */						\
44	PPC_STL	r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0);		\
45	PPC_STL	r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0);		\
46	mfcr	r31;							\
47	stw	r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
48
49#define SCRATCH_RESTORE							\
50	/* Restore state */						\
51	PPC_LL	r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0);		\
52	lwz	r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);		\
53	mtcr	r30;							\
54	PPC_LL	r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0);		\
55									\
56	/* Disable critical section. We are critical if			\
57	   shared->critical == r1 and r2 is always != r1 */		\
58	STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
59
60.global kvm_template_start
61kvm_template_start:
62
63.global kvm_emulate_mtmsrd
64kvm_emulate_mtmsrd:
65
66	SCRATCH_SAVE
67
68	/* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
69	LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
70	lis	r30, (~(MSR_EE | MSR_RI))@h
71	ori	r30, r30, (~(MSR_EE | MSR_RI))@l
72	and	r31, r31, r30
73
74	/* OR the register's (MSR_EE|MSR_RI) on MSR */
75kvm_emulate_mtmsrd_reg:
76	ori	r30, r0, 0
77	andi.	r30, r30, (MSR_EE|MSR_RI)
78	or	r31, r31, r30
79
80	/* Put MSR back into magic page */
81	STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
82
83	/* Check if we have to fetch an interrupt */
84	lwz	r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
85	cmpwi	r31, 0
86	beq+	no_check
87
88	/* Check if we may trigger an interrupt */
89	andi.	r30, r30, MSR_EE
90	beq	no_check
91
92	SCRATCH_RESTORE
93
94	/* Nag hypervisor */
95kvm_emulate_mtmsrd_orig_ins:
96	tlbsync
97
98	b	kvm_emulate_mtmsrd_branch
99
100no_check:
101
102	SCRATCH_RESTORE
103
104	/* Go back to caller */
105kvm_emulate_mtmsrd_branch:
106	b	.
107kvm_emulate_mtmsrd_end:
108
109.global kvm_emulate_mtmsrd_branch_offs
110kvm_emulate_mtmsrd_branch_offs:
111	.long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
112
113.global kvm_emulate_mtmsrd_reg_offs
114kvm_emulate_mtmsrd_reg_offs:
115	.long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
116
117.global kvm_emulate_mtmsrd_orig_ins_offs
118kvm_emulate_mtmsrd_orig_ins_offs:
119	.long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4
120
121.global kvm_emulate_mtmsrd_len
122kvm_emulate_mtmsrd_len:
123	.long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
124
125
126#define MSR_SAFE_BITS (MSR_EE | MSR_RI)
127#define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
128
129.global kvm_emulate_mtmsr
130kvm_emulate_mtmsr:
131
132	SCRATCH_SAVE
133
134	/* Fetch old MSR in r31 */
135	LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
136
137	/* Find the changed bits between old and new MSR */
138kvm_emulate_mtmsr_reg1:
139	ori	r30, r0, 0
140	xor	r31, r30, r31
141
142	/* Check if we need to really do mtmsr */
143	LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
144	and.	r31, r31, r30
145
146	/* No critical bits changed? Maybe we can stay in the guest. */
147	beq	maybe_stay_in_guest
148
149do_mtmsr:
150
151	SCRATCH_RESTORE
152
153	/* Just fire off the mtmsr if it's critical */
154kvm_emulate_mtmsr_orig_ins:
155	mtmsr	r0
156
157	b	kvm_emulate_mtmsr_branch
158
159maybe_stay_in_guest:
160
161	/* Get the target register in r30 */
162kvm_emulate_mtmsr_reg2:
163	ori	r30, r0, 0
164
165	/* Put MSR into magic page because we don't call mtmsr */
166	STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
167
168	/* Check if we have to fetch an interrupt */
169	lwz	r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
170	cmpwi	r31, 0
171	beq+	no_mtmsr
172
173	/* Check if we may trigger an interrupt */
174	andi.	r31, r30, MSR_EE
175	bne	do_mtmsr
176
177no_mtmsr:
178
179	SCRATCH_RESTORE
180
181	/* Go back to caller */
182kvm_emulate_mtmsr_branch:
183	b	.
184kvm_emulate_mtmsr_end:
185
186.global kvm_emulate_mtmsr_branch_offs
187kvm_emulate_mtmsr_branch_offs:
188	.long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
189
190.global kvm_emulate_mtmsr_reg1_offs
191kvm_emulate_mtmsr_reg1_offs:
192	.long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
193
194.global kvm_emulate_mtmsr_reg2_offs
195kvm_emulate_mtmsr_reg2_offs:
196	.long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
197
198.global kvm_emulate_mtmsr_orig_ins_offs
199kvm_emulate_mtmsr_orig_ins_offs:
200	.long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
201
202.global kvm_emulate_mtmsr_len
203kvm_emulate_mtmsr_len:
204	.long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
205
206/* also used for wrteei 1 */
207.global kvm_emulate_wrtee
208kvm_emulate_wrtee:
209
210	SCRATCH_SAVE
211
212	/* Fetch old MSR in r31 */
213	LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
214
215	/* Insert new MSR[EE] */
216kvm_emulate_wrtee_reg:
217	ori	r30, r0, 0
218	rlwimi	r31, r30, 0, MSR_EE
219
220	/*
221	 * If MSR[EE] is now set, check for a pending interrupt.
222	 * We could skip this if MSR[EE] was already on, but that
223	 * should be rare, so don't bother.
224	 */
225	andi.	r30, r30, MSR_EE
226
227	/* Put MSR into magic page because we don't call wrtee */
228	STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
229
230	beq	no_wrtee
231
232	/* Check if we have to fetch an interrupt */
233	lwz	r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
234	cmpwi	r30, 0
235	bne	do_wrtee
236
237no_wrtee:
238	SCRATCH_RESTORE
239
240	/* Go back to caller */
241kvm_emulate_wrtee_branch:
242	b	.
243
244do_wrtee:
245	SCRATCH_RESTORE
246
247	/* Just fire off the wrtee if it's critical */
248kvm_emulate_wrtee_orig_ins:
249	wrtee	r0
250
251	b	kvm_emulate_wrtee_branch
252
253kvm_emulate_wrtee_end:
254
255.global kvm_emulate_wrtee_branch_offs
256kvm_emulate_wrtee_branch_offs:
257	.long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4
258
259.global kvm_emulate_wrtee_reg_offs
260kvm_emulate_wrtee_reg_offs:
261	.long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4
262
263.global kvm_emulate_wrtee_orig_ins_offs
264kvm_emulate_wrtee_orig_ins_offs:
265	.long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4
266
267.global kvm_emulate_wrtee_len
268kvm_emulate_wrtee_len:
269	.long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4
270
271.global kvm_emulate_wrteei_0
272kvm_emulate_wrteei_0:
273	SCRATCH_SAVE
274
275	/* Fetch old MSR in r31 */
276	LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
277
278	/* Remove MSR_EE from old MSR */
279	rlwinm	r31, r31, 0, ~MSR_EE
280
281	/* Write new MSR value back */
282	STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
283
284	SCRATCH_RESTORE
285
286	/* Go back to caller */
287kvm_emulate_wrteei_0_branch:
288	b	.
289kvm_emulate_wrteei_0_end:
290
291.global kvm_emulate_wrteei_0_branch_offs
292kvm_emulate_wrteei_0_branch_offs:
293	.long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4
294
295.global kvm_emulate_wrteei_0_len
296kvm_emulate_wrteei_0_len:
297	.long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4
298
299.global kvm_emulate_mtsrin
300kvm_emulate_mtsrin:
301
302	SCRATCH_SAVE
303
304	LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
305	andi.	r31, r31, MSR_DR | MSR_IR
306	beq	kvm_emulate_mtsrin_reg1
307
308	SCRATCH_RESTORE
309
310kvm_emulate_mtsrin_orig_ins:
311	nop
312	b	kvm_emulate_mtsrin_branch
313
314kvm_emulate_mtsrin_reg1:
315	/* rX >> 26 */
316	rlwinm  r30,r0,6,26,29
317
318kvm_emulate_mtsrin_reg2:
319	stw	r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
320
321	SCRATCH_RESTORE
322
323	/* Go back to caller */
324kvm_emulate_mtsrin_branch:
325	b	.
326kvm_emulate_mtsrin_end:
327
328.global kvm_emulate_mtsrin_branch_offs
329kvm_emulate_mtsrin_branch_offs:
330	.long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
331
332.global kvm_emulate_mtsrin_reg1_offs
333kvm_emulate_mtsrin_reg1_offs:
334	.long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
335
336.global kvm_emulate_mtsrin_reg2_offs
337kvm_emulate_mtsrin_reg2_offs:
338	.long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
339
340.global kvm_emulate_mtsrin_orig_ins_offs
341kvm_emulate_mtsrin_orig_ins_offs:
342	.long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
343
344.global kvm_emulate_mtsrin_len
345kvm_emulate_mtsrin_len:
346	.long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4
347
348.global kvm_template_end
349kvm_template_end:
350