1 /*
2  * Copyright (C) 2012 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include <linux/arm-smccc.h>
19 #include <linux/preempt.h>
20 #include <linux/kvm_host.h>
21 #include <linux/uaccess.h>
22 #include <linux/wait.h>
23 
24 #include <asm/cputype.h>
25 #include <asm/kvm_emulate.h>
26 #include <asm/kvm_host.h>
27 
28 #include <kvm/arm_psci.h>
29 
30 /*
31  * This is an implementation of the Power State Coordination Interface
32  * as described in ARM document number ARM DEN 0022A.
33  */
34 
35 #define AFFINITY_MASK(level)	~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
36 
smccc_get_function(struct kvm_vcpu * vcpu)37 static u32 smccc_get_function(struct kvm_vcpu *vcpu)
38 {
39 	return vcpu_get_reg(vcpu, 0);
40 }
41 
smccc_get_arg1(struct kvm_vcpu * vcpu)42 static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
43 {
44 	return vcpu_get_reg(vcpu, 1);
45 }
46 
smccc_get_arg2(struct kvm_vcpu * vcpu)47 static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
48 {
49 	return vcpu_get_reg(vcpu, 2);
50 }
51 
smccc_get_arg3(struct kvm_vcpu * vcpu)52 static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
53 {
54 	return vcpu_get_reg(vcpu, 3);
55 }
56 
smccc_set_retval(struct kvm_vcpu * vcpu,unsigned long a0,unsigned long a1,unsigned long a2,unsigned long a3)57 static void smccc_set_retval(struct kvm_vcpu *vcpu,
58 			     unsigned long a0,
59 			     unsigned long a1,
60 			     unsigned long a2,
61 			     unsigned long a3)
62 {
63 	vcpu_set_reg(vcpu, 0, a0);
64 	vcpu_set_reg(vcpu, 1, a1);
65 	vcpu_set_reg(vcpu, 2, a2);
66 	vcpu_set_reg(vcpu, 3, a3);
67 }
68 
psci_affinity_mask(unsigned long affinity_level)69 static unsigned long psci_affinity_mask(unsigned long affinity_level)
70 {
71 	if (affinity_level <= 3)
72 		return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
73 
74 	return 0;
75 }
76 
kvm_psci_vcpu_suspend(struct kvm_vcpu * vcpu)77 static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
78 {
79 	/*
80 	 * NOTE: For simplicity, we make VCPU suspend emulation to be
81 	 * same-as WFI (Wait-for-interrupt) emulation.
82 	 *
83 	 * This means for KVM the wakeup events are interrupts and
84 	 * this is consistent with intended use of StateID as described
85 	 * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
86 	 *
87 	 * Further, we also treat power-down request to be same as
88 	 * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
89 	 * specification (ARM DEN 0022A). This means all suspend states
90 	 * for KVM will preserve the register state.
91 	 */
92 	kvm_vcpu_block(vcpu);
93 	kvm_clear_request(KVM_REQ_UNHALT, vcpu);
94 
95 	return PSCI_RET_SUCCESS;
96 }
97 
kvm_psci_vcpu_off(struct kvm_vcpu * vcpu)98 static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
99 {
100 	vcpu->arch.power_off = true;
101 	kvm_make_request(KVM_REQ_SLEEP, vcpu);
102 	kvm_vcpu_kick(vcpu);
103 }
104 
kvm_psci_vcpu_on(struct kvm_vcpu * source_vcpu)105 static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
106 {
107 	struct kvm *kvm = source_vcpu->kvm;
108 	struct kvm_vcpu *vcpu = NULL;
109 	struct swait_queue_head *wq;
110 	unsigned long cpu_id;
111 	unsigned long context_id;
112 	phys_addr_t target_pc;
113 
114 	cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
115 	if (vcpu_mode_is_32bit(source_vcpu))
116 		cpu_id &= ~((u32) 0);
117 
118 	vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
119 
120 	/*
121 	 * Make sure the caller requested a valid CPU and that the CPU is
122 	 * turned off.
123 	 */
124 	if (!vcpu)
125 		return PSCI_RET_INVALID_PARAMS;
126 	if (!vcpu->arch.power_off) {
127 		if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
128 			return PSCI_RET_ALREADY_ON;
129 		else
130 			return PSCI_RET_INVALID_PARAMS;
131 	}
132 
133 	target_pc = smccc_get_arg2(source_vcpu);
134 	context_id = smccc_get_arg3(source_vcpu);
135 
136 	kvm_reset_vcpu(vcpu);
137 
138 	/* Gracefully handle Thumb2 entry point */
139 	if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
140 		target_pc &= ~((phys_addr_t) 1);
141 		vcpu_set_thumb(vcpu);
142 	}
143 
144 	/* Propagate caller endianness */
145 	if (kvm_vcpu_is_be(source_vcpu))
146 		kvm_vcpu_set_be(vcpu);
147 
148 	*vcpu_pc(vcpu) = target_pc;
149 	/*
150 	 * NOTE: We always update r0 (or x0) because for PSCI v0.1
151 	 * the general puspose registers are undefined upon CPU_ON.
152 	 */
153 	smccc_set_retval(vcpu, context_id, 0, 0, 0);
154 	vcpu->arch.power_off = false;
155 	smp_mb();		/* Make sure the above is visible */
156 
157 	wq = kvm_arch_vcpu_wq(vcpu);
158 	swake_up_one(wq);
159 
160 	return PSCI_RET_SUCCESS;
161 }
162 
kvm_psci_vcpu_affinity_info(struct kvm_vcpu * vcpu)163 static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
164 {
165 	int i, matching_cpus = 0;
166 	unsigned long mpidr;
167 	unsigned long target_affinity;
168 	unsigned long target_affinity_mask;
169 	unsigned long lowest_affinity_level;
170 	struct kvm *kvm = vcpu->kvm;
171 	struct kvm_vcpu *tmp;
172 
173 	target_affinity = smccc_get_arg1(vcpu);
174 	lowest_affinity_level = smccc_get_arg2(vcpu);
175 
176 	/* Determine target affinity mask */
177 	target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
178 	if (!target_affinity_mask)
179 		return PSCI_RET_INVALID_PARAMS;
180 
181 	/* Ignore other bits of target affinity */
182 	target_affinity &= target_affinity_mask;
183 
184 	/*
185 	 * If one or more VCPU matching target affinity are running
186 	 * then ON else OFF
187 	 */
188 	kvm_for_each_vcpu(i, tmp, kvm) {
189 		mpidr = kvm_vcpu_get_mpidr_aff(tmp);
190 		if ((mpidr & target_affinity_mask) == target_affinity) {
191 			matching_cpus++;
192 			if (!tmp->arch.power_off)
193 				return PSCI_0_2_AFFINITY_LEVEL_ON;
194 		}
195 	}
196 
197 	if (!matching_cpus)
198 		return PSCI_RET_INVALID_PARAMS;
199 
200 	return PSCI_0_2_AFFINITY_LEVEL_OFF;
201 }
202 
kvm_prepare_system_event(struct kvm_vcpu * vcpu,u32 type)203 static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
204 {
205 	int i;
206 	struct kvm_vcpu *tmp;
207 
208 	/*
209 	 * The KVM ABI specifies that a system event exit may call KVM_RUN
210 	 * again and may perform shutdown/reboot at a later time that when the
211 	 * actual request is made.  Since we are implementing PSCI and a
212 	 * caller of PSCI reboot and shutdown expects that the system shuts
213 	 * down or reboots immediately, let's make sure that VCPUs are not run
214 	 * after this call is handled and before the VCPUs have been
215 	 * re-initialized.
216 	 */
217 	kvm_for_each_vcpu(i, tmp, vcpu->kvm)
218 		tmp->arch.power_off = true;
219 	kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
220 
221 	memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
222 	vcpu->run->system_event.type = type;
223 	vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
224 }
225 
kvm_psci_system_off(struct kvm_vcpu * vcpu)226 static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
227 {
228 	kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
229 }
230 
kvm_psci_system_reset(struct kvm_vcpu * vcpu)231 static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
232 {
233 	kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
234 }
235 
kvm_psci_0_2_call(struct kvm_vcpu * vcpu)236 static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
237 {
238 	struct kvm *kvm = vcpu->kvm;
239 	u32 psci_fn = smccc_get_function(vcpu);
240 	unsigned long val;
241 	int ret = 1;
242 
243 	switch (psci_fn) {
244 	case PSCI_0_2_FN_PSCI_VERSION:
245 		/*
246 		 * Bits[31:16] = Major Version = 0
247 		 * Bits[15:0] = Minor Version = 2
248 		 */
249 		val = KVM_ARM_PSCI_0_2;
250 		break;
251 	case PSCI_0_2_FN_CPU_SUSPEND:
252 	case PSCI_0_2_FN64_CPU_SUSPEND:
253 		val = kvm_psci_vcpu_suspend(vcpu);
254 		break;
255 	case PSCI_0_2_FN_CPU_OFF:
256 		kvm_psci_vcpu_off(vcpu);
257 		val = PSCI_RET_SUCCESS;
258 		break;
259 	case PSCI_0_2_FN_CPU_ON:
260 	case PSCI_0_2_FN64_CPU_ON:
261 		mutex_lock(&kvm->lock);
262 		val = kvm_psci_vcpu_on(vcpu);
263 		mutex_unlock(&kvm->lock);
264 		break;
265 	case PSCI_0_2_FN_AFFINITY_INFO:
266 	case PSCI_0_2_FN64_AFFINITY_INFO:
267 		val = kvm_psci_vcpu_affinity_info(vcpu);
268 		break;
269 	case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
270 		/*
271 		 * Trusted OS is MP hence does not require migration
272 	         * or
273 		 * Trusted OS is not present
274 		 */
275 		val = PSCI_0_2_TOS_MP;
276 		break;
277 	case PSCI_0_2_FN_SYSTEM_OFF:
278 		kvm_psci_system_off(vcpu);
279 		/*
280 		 * We should'nt be going back to guest VCPU after
281 		 * receiving SYSTEM_OFF request.
282 		 *
283 		 * If user space accidently/deliberately resumes
284 		 * guest VCPU after SYSTEM_OFF request then guest
285 		 * VCPU should see internal failure from PSCI return
286 		 * value. To achieve this, we preload r0 (or x0) with
287 		 * PSCI return value INTERNAL_FAILURE.
288 		 */
289 		val = PSCI_RET_INTERNAL_FAILURE;
290 		ret = 0;
291 		break;
292 	case PSCI_0_2_FN_SYSTEM_RESET:
293 		kvm_psci_system_reset(vcpu);
294 		/*
295 		 * Same reason as SYSTEM_OFF for preloading r0 (or x0)
296 		 * with PSCI return value INTERNAL_FAILURE.
297 		 */
298 		val = PSCI_RET_INTERNAL_FAILURE;
299 		ret = 0;
300 		break;
301 	default:
302 		val = PSCI_RET_NOT_SUPPORTED;
303 		break;
304 	}
305 
306 	smccc_set_retval(vcpu, val, 0, 0, 0);
307 	return ret;
308 }
309 
kvm_psci_1_0_call(struct kvm_vcpu * vcpu)310 static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
311 {
312 	u32 psci_fn = smccc_get_function(vcpu);
313 	u32 feature;
314 	unsigned long val;
315 	int ret = 1;
316 
317 	switch(psci_fn) {
318 	case PSCI_0_2_FN_PSCI_VERSION:
319 		val = KVM_ARM_PSCI_1_0;
320 		break;
321 	case PSCI_1_0_FN_PSCI_FEATURES:
322 		feature = smccc_get_arg1(vcpu);
323 		switch(feature) {
324 		case PSCI_0_2_FN_PSCI_VERSION:
325 		case PSCI_0_2_FN_CPU_SUSPEND:
326 		case PSCI_0_2_FN64_CPU_SUSPEND:
327 		case PSCI_0_2_FN_CPU_OFF:
328 		case PSCI_0_2_FN_CPU_ON:
329 		case PSCI_0_2_FN64_CPU_ON:
330 		case PSCI_0_2_FN_AFFINITY_INFO:
331 		case PSCI_0_2_FN64_AFFINITY_INFO:
332 		case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
333 		case PSCI_0_2_FN_SYSTEM_OFF:
334 		case PSCI_0_2_FN_SYSTEM_RESET:
335 		case PSCI_1_0_FN_PSCI_FEATURES:
336 		case ARM_SMCCC_VERSION_FUNC_ID:
337 			val = 0;
338 			break;
339 		default:
340 			val = PSCI_RET_NOT_SUPPORTED;
341 			break;
342 		}
343 		break;
344 	default:
345 		return kvm_psci_0_2_call(vcpu);
346 	}
347 
348 	smccc_set_retval(vcpu, val, 0, 0, 0);
349 	return ret;
350 }
351 
kvm_psci_0_1_call(struct kvm_vcpu * vcpu)352 static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
353 {
354 	struct kvm *kvm = vcpu->kvm;
355 	u32 psci_fn = smccc_get_function(vcpu);
356 	unsigned long val;
357 
358 	switch (psci_fn) {
359 	case KVM_PSCI_FN_CPU_OFF:
360 		kvm_psci_vcpu_off(vcpu);
361 		val = PSCI_RET_SUCCESS;
362 		break;
363 	case KVM_PSCI_FN_CPU_ON:
364 		mutex_lock(&kvm->lock);
365 		val = kvm_psci_vcpu_on(vcpu);
366 		mutex_unlock(&kvm->lock);
367 		break;
368 	default:
369 		val = PSCI_RET_NOT_SUPPORTED;
370 		break;
371 	}
372 
373 	smccc_set_retval(vcpu, val, 0, 0, 0);
374 	return 1;
375 }
376 
377 /**
378  * kvm_psci_call - handle PSCI call if r0 value is in range
379  * @vcpu: Pointer to the VCPU struct
380  *
381  * Handle PSCI calls from guests through traps from HVC instructions.
382  * The calling convention is similar to SMC calls to the secure world
383  * where the function number is placed in r0.
384  *
385  * This function returns: > 0 (success), 0 (success but exit to user
386  * space), and < 0 (errors)
387  *
388  * Errors:
389  * -EINVAL: Unrecognized PSCI function
390  */
kvm_psci_call(struct kvm_vcpu * vcpu)391 static int kvm_psci_call(struct kvm_vcpu *vcpu)
392 {
393 	switch (kvm_psci_version(vcpu, vcpu->kvm)) {
394 	case KVM_ARM_PSCI_1_0:
395 		return kvm_psci_1_0_call(vcpu);
396 	case KVM_ARM_PSCI_0_2:
397 		return kvm_psci_0_2_call(vcpu);
398 	case KVM_ARM_PSCI_0_1:
399 		return kvm_psci_0_1_call(vcpu);
400 	default:
401 		return -EINVAL;
402 	};
403 }
404 
kvm_hvc_call_handler(struct kvm_vcpu * vcpu)405 int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
406 {
407 	u32 func_id = smccc_get_function(vcpu);
408 	u32 val = SMCCC_RET_NOT_SUPPORTED;
409 	u32 feature;
410 
411 	switch (func_id) {
412 	case ARM_SMCCC_VERSION_FUNC_ID:
413 		val = ARM_SMCCC_VERSION_1_1;
414 		break;
415 	case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
416 		feature = smccc_get_arg1(vcpu);
417 		switch(feature) {
418 		case ARM_SMCCC_ARCH_WORKAROUND_1:
419 			if (kvm_arm_harden_branch_predictor())
420 				val = SMCCC_RET_SUCCESS;
421 			break;
422 		case ARM_SMCCC_ARCH_WORKAROUND_2:
423 			switch (kvm_arm_have_ssbd()) {
424 			case KVM_SSBD_FORCE_DISABLE:
425 			case KVM_SSBD_UNKNOWN:
426 				break;
427 			case KVM_SSBD_KERNEL:
428 				val = SMCCC_RET_SUCCESS;
429 				break;
430 			case KVM_SSBD_FORCE_ENABLE:
431 			case KVM_SSBD_MITIGATED:
432 				val = SMCCC_RET_NOT_REQUIRED;
433 				break;
434 			}
435 			break;
436 		}
437 		break;
438 	default:
439 		return kvm_psci_call(vcpu);
440 	}
441 
442 	smccc_set_retval(vcpu, val, 0, 0, 0);
443 	return 1;
444 }
445 
kvm_arm_get_fw_num_regs(struct kvm_vcpu * vcpu)446 int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
447 {
448 	return 1;		/* PSCI version */
449 }
450 
kvm_arm_copy_fw_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)451 int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
452 {
453 	if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices))
454 		return -EFAULT;
455 
456 	return 0;
457 }
458 
kvm_arm_get_fw_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)459 int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
460 {
461 	if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
462 		void __user *uaddr = (void __user *)(long)reg->addr;
463 		u64 val;
464 
465 		val = kvm_psci_version(vcpu, vcpu->kvm);
466 		if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
467 			return -EFAULT;
468 
469 		return 0;
470 	}
471 
472 	return -EINVAL;
473 }
474 
kvm_arm_set_fw_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)475 int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
476 {
477 	if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
478 		void __user *uaddr = (void __user *)(long)reg->addr;
479 		bool wants_02;
480 		u64 val;
481 
482 		if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
483 			return -EFAULT;
484 
485 		wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
486 
487 		switch (val) {
488 		case KVM_ARM_PSCI_0_1:
489 			if (wants_02)
490 				return -EINVAL;
491 			vcpu->kvm->arch.psci_version = val;
492 			return 0;
493 		case KVM_ARM_PSCI_0_2:
494 		case KVM_ARM_PSCI_1_0:
495 			if (!wants_02)
496 				return -EINVAL;
497 			vcpu->kvm->arch.psci_version = val;
498 			return 0;
499 		}
500 	}
501 
502 	return -EINVAL;
503 }
504