1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2017 Arm Ltd.
3 #define pr_fmt(fmt) "sdei: " fmt
4
5 #include <acpi/ghes.h>
6 #include <linux/acpi.h>
7 #include <linux/arm_sdei.h>
8 #include <linux/arm-smccc.h>
9 #include <linux/atomic.h>
10 #include <linux/bitops.h>
11 #include <linux/compiler.h>
12 #include <linux/cpuhotplug.h>
13 #include <linux/cpu.h>
14 #include <linux/cpu_pm.h>
15 #include <linux/errno.h>
16 #include <linux/hardirq.h>
17 #include <linux/kernel.h>
18 #include <linux/kprobes.h>
19 #include <linux/kvm_host.h>
20 #include <linux/list.h>
21 #include <linux/mutex.h>
22 #include <linux/notifier.h>
23 #include <linux/of.h>
24 #include <linux/of_platform.h>
25 #include <linux/percpu.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm.h>
28 #include <linux/ptrace.h>
29 #include <linux/preempt.h>
30 #include <linux/reboot.h>
31 #include <linux/slab.h>
32 #include <linux/smp.h>
33 #include <linux/spinlock.h>
34 #include <linux/uaccess.h>
35
36 /*
37 * The call to use to reach the firmware.
38 */
39 static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
40 unsigned long arg0, unsigned long arg1,
41 unsigned long arg2, unsigned long arg3,
42 unsigned long arg4, struct arm_smccc_res *res);
43
44 /* entry point from firmware to arch asm code */
45 static unsigned long sdei_entry_point;
46
47 struct sdei_event {
48 /* These three are protected by the sdei_list_lock */
49 struct list_head list;
50 bool reregister;
51 bool reenable;
52
53 u32 event_num;
54 u8 type;
55 u8 priority;
56
57 /* This pointer is handed to firmware as the event argument. */
58 union {
59 /* Shared events */
60 struct sdei_registered_event *registered;
61
62 /* CPU private events */
63 struct sdei_registered_event __percpu *private_registered;
64 };
65 };
66
67 /* Take the mutex for any API call or modification. Take the mutex first. */
68 static DEFINE_MUTEX(sdei_events_lock);
69
70 /* and then hold this when modifying the list */
71 static DEFINE_SPINLOCK(sdei_list_lock);
72 static LIST_HEAD(sdei_list);
73
74 /* Private events are registered/enabled via IPI passing one of these */
75 struct sdei_crosscall_args {
76 struct sdei_event *event;
77 atomic_t errors;
78 int first_error;
79 };
80
81 #define CROSSCALL_INIT(arg, event) \
82 do { \
83 arg.event = event; \
84 arg.first_error = 0; \
85 atomic_set(&arg.errors, 0); \
86 } while (0)
87
sdei_do_local_call(smp_call_func_t fn,struct sdei_event * event)88 static inline int sdei_do_local_call(smp_call_func_t fn,
89 struct sdei_event *event)
90 {
91 struct sdei_crosscall_args arg;
92
93 CROSSCALL_INIT(arg, event);
94 fn(&arg);
95
96 return arg.first_error;
97 }
98
sdei_do_cross_call(smp_call_func_t fn,struct sdei_event * event)99 static inline int sdei_do_cross_call(smp_call_func_t fn,
100 struct sdei_event *event)
101 {
102 struct sdei_crosscall_args arg;
103
104 CROSSCALL_INIT(arg, event);
105 on_each_cpu(fn, &arg, true);
106
107 return arg.first_error;
108 }
109
110 static inline void
sdei_cross_call_return(struct sdei_crosscall_args * arg,int err)111 sdei_cross_call_return(struct sdei_crosscall_args *arg, int err)
112 {
113 if (err && (atomic_inc_return(&arg->errors) == 1))
114 arg->first_error = err;
115 }
116
sdei_to_linux_errno(unsigned long sdei_err)117 static int sdei_to_linux_errno(unsigned long sdei_err)
118 {
119 switch (sdei_err) {
120 case SDEI_NOT_SUPPORTED:
121 return -EOPNOTSUPP;
122 case SDEI_INVALID_PARAMETERS:
123 return -EINVAL;
124 case SDEI_DENIED:
125 return -EPERM;
126 case SDEI_PENDING:
127 return -EINPROGRESS;
128 case SDEI_OUT_OF_RESOURCE:
129 return -ENOMEM;
130 }
131
132 return 0;
133 }
134
invoke_sdei_fn(unsigned long function_id,unsigned long arg0,unsigned long arg1,unsigned long arg2,unsigned long arg3,unsigned long arg4,u64 * result)135 static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
136 unsigned long arg1, unsigned long arg2,
137 unsigned long arg3, unsigned long arg4,
138 u64 *result)
139 {
140 int err;
141 struct arm_smccc_res res;
142
143 if (sdei_firmware_call) {
144 sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4,
145 &res);
146 err = sdei_to_linux_errno(res.a0);
147 } else {
148 /*
149 * !sdei_firmware_call means we failed to probe or called
150 * sdei_mark_interface_broken(). -EIO is not an error returned
151 * by sdei_to_linux_errno() and is used to suppress messages
152 * from this driver.
153 */
154 err = -EIO;
155 res.a0 = SDEI_NOT_SUPPORTED;
156 }
157
158 if (result)
159 *result = res.a0;
160
161 return err;
162 }
163 NOKPROBE_SYMBOL(invoke_sdei_fn);
164
sdei_event_find(u32 event_num)165 static struct sdei_event *sdei_event_find(u32 event_num)
166 {
167 struct sdei_event *e, *found = NULL;
168
169 lockdep_assert_held(&sdei_events_lock);
170
171 spin_lock(&sdei_list_lock);
172 list_for_each_entry(e, &sdei_list, list) {
173 if (e->event_num == event_num) {
174 found = e;
175 break;
176 }
177 }
178 spin_unlock(&sdei_list_lock);
179
180 return found;
181 }
182
sdei_api_event_context(u32 query,u64 * result)183 int sdei_api_event_context(u32 query, u64 *result)
184 {
185 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_CONTEXT, query, 0, 0, 0, 0,
186 result);
187 }
188 NOKPROBE_SYMBOL(sdei_api_event_context);
189
sdei_api_event_get_info(u32 event,u32 info,u64 * result)190 static int sdei_api_event_get_info(u32 event, u32 info, u64 *result)
191 {
192 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0,
193 0, 0, result);
194 }
195
sdei_event_create(u32 event_num,sdei_event_callback * cb,void * cb_arg)196 static struct sdei_event *sdei_event_create(u32 event_num,
197 sdei_event_callback *cb,
198 void *cb_arg)
199 {
200 int err;
201 u64 result;
202 struct sdei_event *event;
203 struct sdei_registered_event *reg;
204
205 lockdep_assert_held(&sdei_events_lock);
206
207 event = kzalloc(sizeof(*event), GFP_KERNEL);
208 if (!event) {
209 err = -ENOMEM;
210 goto fail;
211 }
212
213 INIT_LIST_HEAD(&event->list);
214 event->event_num = event_num;
215
216 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
217 &result);
218 if (err)
219 goto fail;
220 event->priority = result;
221
222 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_TYPE,
223 &result);
224 if (err)
225 goto fail;
226 event->type = result;
227
228 if (event->type == SDEI_EVENT_TYPE_SHARED) {
229 reg = kzalloc(sizeof(*reg), GFP_KERNEL);
230 if (!reg) {
231 err = -ENOMEM;
232 goto fail;
233 }
234
235 reg->event_num = event->event_num;
236 reg->priority = event->priority;
237
238 reg->callback = cb;
239 reg->callback_arg = cb_arg;
240 event->registered = reg;
241 } else {
242 int cpu;
243 struct sdei_registered_event __percpu *regs;
244
245 regs = alloc_percpu(struct sdei_registered_event);
246 if (!regs) {
247 err = -ENOMEM;
248 goto fail;
249 }
250
251 for_each_possible_cpu(cpu) {
252 reg = per_cpu_ptr(regs, cpu);
253
254 reg->event_num = event->event_num;
255 reg->priority = event->priority;
256 reg->callback = cb;
257 reg->callback_arg = cb_arg;
258 }
259
260 event->private_registered = regs;
261 }
262
263 spin_lock(&sdei_list_lock);
264 list_add(&event->list, &sdei_list);
265 spin_unlock(&sdei_list_lock);
266
267 return event;
268
269 fail:
270 kfree(event);
271 return ERR_PTR(err);
272 }
273
sdei_event_destroy_llocked(struct sdei_event * event)274 static void sdei_event_destroy_llocked(struct sdei_event *event)
275 {
276 lockdep_assert_held(&sdei_events_lock);
277 lockdep_assert_held(&sdei_list_lock);
278
279 list_del(&event->list);
280
281 if (event->type == SDEI_EVENT_TYPE_SHARED)
282 kfree(event->registered);
283 else
284 free_percpu(event->private_registered);
285
286 kfree(event);
287 }
288
sdei_event_destroy(struct sdei_event * event)289 static void sdei_event_destroy(struct sdei_event *event)
290 {
291 spin_lock(&sdei_list_lock);
292 sdei_event_destroy_llocked(event);
293 spin_unlock(&sdei_list_lock);
294 }
295
sdei_api_get_version(u64 * version)296 static int sdei_api_get_version(u64 *version)
297 {
298 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version);
299 }
300
sdei_mask_local_cpu(void)301 int sdei_mask_local_cpu(void)
302 {
303 int err;
304
305 WARN_ON_ONCE(preemptible());
306
307 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
308 if (err && err != -EIO) {
309 pr_warn_once("failed to mask CPU[%u]: %d\n",
310 smp_processor_id(), err);
311 return err;
312 }
313
314 return 0;
315 }
316
_ipi_mask_cpu(void * ignored)317 static void _ipi_mask_cpu(void *ignored)
318 {
319 sdei_mask_local_cpu();
320 }
321
sdei_unmask_local_cpu(void)322 int sdei_unmask_local_cpu(void)
323 {
324 int err;
325
326 WARN_ON_ONCE(preemptible());
327
328 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
329 if (err && err != -EIO) {
330 pr_warn_once("failed to unmask CPU[%u]: %d\n",
331 smp_processor_id(), err);
332 return err;
333 }
334
335 return 0;
336 }
337
_ipi_unmask_cpu(void * ignored)338 static void _ipi_unmask_cpu(void *ignored)
339 {
340 sdei_unmask_local_cpu();
341 }
342
_ipi_private_reset(void * ignored)343 static void _ipi_private_reset(void *ignored)
344 {
345 int err;
346
347 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
348 NULL);
349 if (err && err != -EIO)
350 pr_warn_once("failed to reset CPU[%u]: %d\n",
351 smp_processor_id(), err);
352 }
353
sdei_api_shared_reset(void)354 static int sdei_api_shared_reset(void)
355 {
356 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_SHARED_RESET, 0, 0, 0, 0, 0,
357 NULL);
358 }
359
sdei_mark_interface_broken(void)360 static void sdei_mark_interface_broken(void)
361 {
362 pr_err("disabling SDEI firmware interface\n");
363 on_each_cpu(&_ipi_mask_cpu, NULL, true);
364 sdei_firmware_call = NULL;
365 }
366
sdei_platform_reset(void)367 static int sdei_platform_reset(void)
368 {
369 int err;
370
371 on_each_cpu(&_ipi_private_reset, NULL, true);
372 err = sdei_api_shared_reset();
373 if (err) {
374 pr_err("Failed to reset platform: %d\n", err);
375 sdei_mark_interface_broken();
376 }
377
378 return err;
379 }
380
sdei_api_event_enable(u32 event_num)381 static int sdei_api_event_enable(u32 event_num)
382 {
383 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0,
384 0, NULL);
385 }
386
387 /* Called directly by the hotplug callbacks */
_local_event_enable(void * data)388 static void _local_event_enable(void *data)
389 {
390 int err;
391 struct sdei_crosscall_args *arg = data;
392
393 WARN_ON_ONCE(preemptible());
394
395 err = sdei_api_event_enable(arg->event->event_num);
396
397 sdei_cross_call_return(arg, err);
398 }
399
sdei_event_enable(u32 event_num)400 int sdei_event_enable(u32 event_num)
401 {
402 int err = -EINVAL;
403 struct sdei_event *event;
404
405 mutex_lock(&sdei_events_lock);
406 event = sdei_event_find(event_num);
407 if (!event) {
408 mutex_unlock(&sdei_events_lock);
409 return -ENOENT;
410 }
411
412
413 cpus_read_lock();
414 if (event->type == SDEI_EVENT_TYPE_SHARED)
415 err = sdei_api_event_enable(event->event_num);
416 else
417 err = sdei_do_cross_call(_local_event_enable, event);
418
419 if (!err) {
420 spin_lock(&sdei_list_lock);
421 event->reenable = true;
422 spin_unlock(&sdei_list_lock);
423 }
424 cpus_read_unlock();
425 mutex_unlock(&sdei_events_lock);
426
427 return err;
428 }
429
sdei_api_event_disable(u32 event_num)430 static int sdei_api_event_disable(u32 event_num)
431 {
432 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0,
433 0, 0, NULL);
434 }
435
_ipi_event_disable(void * data)436 static void _ipi_event_disable(void *data)
437 {
438 int err;
439 struct sdei_crosscall_args *arg = data;
440
441 err = sdei_api_event_disable(arg->event->event_num);
442
443 sdei_cross_call_return(arg, err);
444 }
445
sdei_event_disable(u32 event_num)446 int sdei_event_disable(u32 event_num)
447 {
448 int err = -EINVAL;
449 struct sdei_event *event;
450
451 mutex_lock(&sdei_events_lock);
452 event = sdei_event_find(event_num);
453 if (!event) {
454 mutex_unlock(&sdei_events_lock);
455 return -ENOENT;
456 }
457
458 spin_lock(&sdei_list_lock);
459 event->reenable = false;
460 spin_unlock(&sdei_list_lock);
461
462 if (event->type == SDEI_EVENT_TYPE_SHARED)
463 err = sdei_api_event_disable(event->event_num);
464 else
465 err = sdei_do_cross_call(_ipi_event_disable, event);
466 mutex_unlock(&sdei_events_lock);
467
468 return err;
469 }
470
sdei_api_event_unregister(u32 event_num)471 static int sdei_api_event_unregister(u32 event_num)
472 {
473 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_UNREGISTER, event_num, 0,
474 0, 0, 0, NULL);
475 }
476
477 /* Called directly by the hotplug callbacks */
_local_event_unregister(void * data)478 static void _local_event_unregister(void *data)
479 {
480 int err;
481 struct sdei_crosscall_args *arg = data;
482
483 WARN_ON_ONCE(preemptible());
484
485 err = sdei_api_event_unregister(arg->event->event_num);
486
487 sdei_cross_call_return(arg, err);
488 }
489
sdei_event_unregister(u32 event_num)490 int sdei_event_unregister(u32 event_num)
491 {
492 int err;
493 struct sdei_event *event;
494
495 WARN_ON(in_nmi());
496
497 mutex_lock(&sdei_events_lock);
498 event = sdei_event_find(event_num);
499 if (!event) {
500 pr_warn("Event %u not registered\n", event_num);
501 err = -ENOENT;
502 goto unlock;
503 }
504
505 spin_lock(&sdei_list_lock);
506 event->reregister = false;
507 event->reenable = false;
508 spin_unlock(&sdei_list_lock);
509
510 if (event->type == SDEI_EVENT_TYPE_SHARED)
511 err = sdei_api_event_unregister(event->event_num);
512 else
513 err = sdei_do_cross_call(_local_event_unregister, event);
514
515 if (err)
516 goto unlock;
517
518 sdei_event_destroy(event);
519 unlock:
520 mutex_unlock(&sdei_events_lock);
521
522 return err;
523 }
524
525 /*
526 * unregister events, but don't destroy them as they are re-registered by
527 * sdei_reregister_shared().
528 */
sdei_unregister_shared(void)529 static int sdei_unregister_shared(void)
530 {
531 int err = 0;
532 struct sdei_event *event;
533
534 mutex_lock(&sdei_events_lock);
535 spin_lock(&sdei_list_lock);
536 list_for_each_entry(event, &sdei_list, list) {
537 if (event->type != SDEI_EVENT_TYPE_SHARED)
538 continue;
539
540 err = sdei_api_event_unregister(event->event_num);
541 if (err)
542 break;
543 }
544 spin_unlock(&sdei_list_lock);
545 mutex_unlock(&sdei_events_lock);
546
547 return err;
548 }
549
sdei_api_event_register(u32 event_num,unsigned long entry_point,void * arg,u64 flags,u64 affinity)550 static int sdei_api_event_register(u32 event_num, unsigned long entry_point,
551 void *arg, u64 flags, u64 affinity)
552 {
553 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_REGISTER, event_num,
554 (unsigned long)entry_point, (unsigned long)arg,
555 flags, affinity, NULL);
556 }
557
558 /* Called directly by the hotplug callbacks */
_local_event_register(void * data)559 static void _local_event_register(void *data)
560 {
561 int err;
562 struct sdei_registered_event *reg;
563 struct sdei_crosscall_args *arg = data;
564
565 WARN_ON(preemptible());
566
567 reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
568 err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
569 reg, 0, 0);
570
571 sdei_cross_call_return(arg, err);
572 }
573
sdei_event_register(u32 event_num,sdei_event_callback * cb,void * arg)574 int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
575 {
576 int err;
577 struct sdei_event *event;
578
579 WARN_ON(in_nmi());
580
581 mutex_lock(&sdei_events_lock);
582 if (sdei_event_find(event_num)) {
583 pr_warn("Event %u already registered\n", event_num);
584 err = -EBUSY;
585 goto unlock;
586 }
587
588 event = sdei_event_create(event_num, cb, arg);
589 if (IS_ERR(event)) {
590 err = PTR_ERR(event);
591 pr_warn("Failed to create event %u: %d\n", event_num, err);
592 goto unlock;
593 }
594
595 cpus_read_lock();
596 if (event->type == SDEI_EVENT_TYPE_SHARED) {
597 err = sdei_api_event_register(event->event_num,
598 sdei_entry_point,
599 event->registered,
600 SDEI_EVENT_REGISTER_RM_ANY, 0);
601 } else {
602 err = sdei_do_cross_call(_local_event_register, event);
603 if (err)
604 sdei_do_cross_call(_local_event_unregister, event);
605 }
606
607 if (err) {
608 sdei_event_destroy(event);
609 pr_warn("Failed to register event %u: %d\n", event_num, err);
610 goto cpu_unlock;
611 }
612
613 spin_lock(&sdei_list_lock);
614 event->reregister = true;
615 spin_unlock(&sdei_list_lock);
616 cpu_unlock:
617 cpus_read_unlock();
618 unlock:
619 mutex_unlock(&sdei_events_lock);
620 return err;
621 }
622
sdei_reregister_shared(void)623 static int sdei_reregister_shared(void)
624 {
625 int err = 0;
626 struct sdei_event *event;
627
628 mutex_lock(&sdei_events_lock);
629 spin_lock(&sdei_list_lock);
630 list_for_each_entry(event, &sdei_list, list) {
631 if (event->type != SDEI_EVENT_TYPE_SHARED)
632 continue;
633
634 if (event->reregister) {
635 err = sdei_api_event_register(event->event_num,
636 sdei_entry_point, event->registered,
637 SDEI_EVENT_REGISTER_RM_ANY, 0);
638 if (err) {
639 pr_err("Failed to re-register event %u\n",
640 event->event_num);
641 sdei_event_destroy_llocked(event);
642 break;
643 }
644 }
645
646 if (event->reenable) {
647 err = sdei_api_event_enable(event->event_num);
648 if (err) {
649 pr_err("Failed to re-enable event %u\n",
650 event->event_num);
651 break;
652 }
653 }
654 }
655 spin_unlock(&sdei_list_lock);
656 mutex_unlock(&sdei_events_lock);
657
658 return err;
659 }
660
sdei_cpuhp_down(unsigned int cpu)661 static int sdei_cpuhp_down(unsigned int cpu)
662 {
663 struct sdei_event *event;
664 int err;
665
666 /* un-register private events */
667 spin_lock(&sdei_list_lock);
668 list_for_each_entry(event, &sdei_list, list) {
669 if (event->type == SDEI_EVENT_TYPE_SHARED)
670 continue;
671
672 err = sdei_do_local_call(_local_event_unregister, event);
673 if (err) {
674 pr_err("Failed to unregister event %u: %d\n",
675 event->event_num, err);
676 }
677 }
678 spin_unlock(&sdei_list_lock);
679
680 return sdei_mask_local_cpu();
681 }
682
sdei_cpuhp_up(unsigned int cpu)683 static int sdei_cpuhp_up(unsigned int cpu)
684 {
685 struct sdei_event *event;
686 int err;
687
688 /* re-register/enable private events */
689 spin_lock(&sdei_list_lock);
690 list_for_each_entry(event, &sdei_list, list) {
691 if (event->type == SDEI_EVENT_TYPE_SHARED)
692 continue;
693
694 if (event->reregister) {
695 err = sdei_do_local_call(_local_event_register, event);
696 if (err) {
697 pr_err("Failed to re-register event %u: %d\n",
698 event->event_num, err);
699 }
700 }
701
702 if (event->reenable) {
703 err = sdei_do_local_call(_local_event_enable, event);
704 if (err) {
705 pr_err("Failed to re-enable event %u: %d\n",
706 event->event_num, err);
707 }
708 }
709 }
710 spin_unlock(&sdei_list_lock);
711
712 return sdei_unmask_local_cpu();
713 }
714
715 /* When entering idle, mask/unmask events for this cpu */
sdei_pm_notifier(struct notifier_block * nb,unsigned long action,void * data)716 static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
717 void *data)
718 {
719 int rv;
720
721 switch (action) {
722 case CPU_PM_ENTER:
723 rv = sdei_mask_local_cpu();
724 break;
725 case CPU_PM_EXIT:
726 case CPU_PM_ENTER_FAILED:
727 rv = sdei_unmask_local_cpu();
728 break;
729 default:
730 return NOTIFY_DONE;
731 }
732
733 if (rv)
734 return notifier_from_errno(rv);
735
736 return NOTIFY_OK;
737 }
738
739 static struct notifier_block sdei_pm_nb = {
740 .notifier_call = sdei_pm_notifier,
741 };
742
sdei_device_suspend(struct device * dev)743 static int sdei_device_suspend(struct device *dev)
744 {
745 on_each_cpu(_ipi_mask_cpu, NULL, true);
746
747 return 0;
748 }
749
sdei_device_resume(struct device * dev)750 static int sdei_device_resume(struct device *dev)
751 {
752 on_each_cpu(_ipi_unmask_cpu, NULL, true);
753
754 return 0;
755 }
756
757 /*
758 * We need all events to be reregistered when we resume from hibernate.
759 *
760 * The sequence is freeze->thaw. Reboot. freeze->restore. We unregister
761 * events during freeze, then re-register and re-enable them during thaw
762 * and restore.
763 */
sdei_device_freeze(struct device * dev)764 static int sdei_device_freeze(struct device *dev)
765 {
766 int err;
767
768 /* unregister private events */
769 cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
770
771 err = sdei_unregister_shared();
772 if (err)
773 return err;
774
775 return 0;
776 }
777
sdei_device_thaw(struct device * dev)778 static int sdei_device_thaw(struct device *dev)
779 {
780 int err;
781
782 /* re-register shared events */
783 err = sdei_reregister_shared();
784 if (err) {
785 pr_warn("Failed to re-register shared events...\n");
786 sdei_mark_interface_broken();
787 return err;
788 }
789
790 err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
791 &sdei_cpuhp_up, &sdei_cpuhp_down);
792 if (err)
793 pr_warn("Failed to re-register CPU hotplug notifier...\n");
794
795 return err;
796 }
797
sdei_device_restore(struct device * dev)798 static int sdei_device_restore(struct device *dev)
799 {
800 int err;
801
802 err = sdei_platform_reset();
803 if (err)
804 return err;
805
806 return sdei_device_thaw(dev);
807 }
808
809 static const struct dev_pm_ops sdei_pm_ops = {
810 .suspend = sdei_device_suspend,
811 .resume = sdei_device_resume,
812 .freeze = sdei_device_freeze,
813 .thaw = sdei_device_thaw,
814 .restore = sdei_device_restore,
815 };
816
817 /*
818 * Mask all CPUs and unregister all events on panic, reboot or kexec.
819 */
sdei_reboot_notifier(struct notifier_block * nb,unsigned long action,void * data)820 static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
821 void *data)
822 {
823 /*
824 * We are going to reset the interface, after this there is no point
825 * doing work when we take CPUs offline.
826 */
827 cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
828
829 sdei_platform_reset();
830
831 return NOTIFY_OK;
832 }
833
834 static struct notifier_block sdei_reboot_nb = {
835 .notifier_call = sdei_reboot_notifier,
836 };
837
sdei_smccc_smc(unsigned long function_id,unsigned long arg0,unsigned long arg1,unsigned long arg2,unsigned long arg3,unsigned long arg4,struct arm_smccc_res * res)838 static void sdei_smccc_smc(unsigned long function_id,
839 unsigned long arg0, unsigned long arg1,
840 unsigned long arg2, unsigned long arg3,
841 unsigned long arg4, struct arm_smccc_res *res)
842 {
843 arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
844 }
845 NOKPROBE_SYMBOL(sdei_smccc_smc);
846
sdei_smccc_hvc(unsigned long function_id,unsigned long arg0,unsigned long arg1,unsigned long arg2,unsigned long arg3,unsigned long arg4,struct arm_smccc_res * res)847 static void sdei_smccc_hvc(unsigned long function_id,
848 unsigned long arg0, unsigned long arg1,
849 unsigned long arg2, unsigned long arg3,
850 unsigned long arg4, struct arm_smccc_res *res)
851 {
852 arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
853 }
854 NOKPROBE_SYMBOL(sdei_smccc_hvc);
855
sdei_register_ghes(struct ghes * ghes,sdei_event_callback * normal_cb,sdei_event_callback * critical_cb)856 int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb,
857 sdei_event_callback *critical_cb)
858 {
859 int err;
860 u64 result;
861 u32 event_num;
862 sdei_event_callback *cb;
863
864 if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
865 return -EOPNOTSUPP;
866
867 event_num = ghes->generic->notify.vector;
868 if (event_num == 0) {
869 /*
870 * Event 0 is reserved by the specification for
871 * SDEI_EVENT_SIGNAL.
872 */
873 return -EINVAL;
874 }
875
876 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
877 &result);
878 if (err)
879 return err;
880
881 if (result == SDEI_EVENT_PRIORITY_CRITICAL)
882 cb = critical_cb;
883 else
884 cb = normal_cb;
885
886 err = sdei_event_register(event_num, cb, ghes);
887 if (!err)
888 err = sdei_event_enable(event_num);
889
890 return err;
891 }
892
sdei_unregister_ghes(struct ghes * ghes)893 int sdei_unregister_ghes(struct ghes *ghes)
894 {
895 int i;
896 int err;
897 u32 event_num = ghes->generic->notify.vector;
898
899 might_sleep();
900
901 if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
902 return -EOPNOTSUPP;
903
904 /*
905 * The event may be running on another CPU. Disable it
906 * to stop new events, then try to unregister a few times.
907 */
908 err = sdei_event_disable(event_num);
909 if (err)
910 return err;
911
912 for (i = 0; i < 3; i++) {
913 err = sdei_event_unregister(event_num);
914 if (err != -EINPROGRESS)
915 break;
916
917 schedule();
918 }
919
920 return err;
921 }
922
sdei_get_conduit(struct platform_device * pdev)923 static int sdei_get_conduit(struct platform_device *pdev)
924 {
925 const char *method;
926 struct device_node *np = pdev->dev.of_node;
927
928 sdei_firmware_call = NULL;
929 if (np) {
930 if (of_property_read_string(np, "method", &method)) {
931 pr_warn("missing \"method\" property\n");
932 return SMCCC_CONDUIT_NONE;
933 }
934
935 if (!strcmp("hvc", method)) {
936 sdei_firmware_call = &sdei_smccc_hvc;
937 return SMCCC_CONDUIT_HVC;
938 } else if (!strcmp("smc", method)) {
939 sdei_firmware_call = &sdei_smccc_smc;
940 return SMCCC_CONDUIT_SMC;
941 }
942
943 pr_warn("invalid \"method\" property: %s\n", method);
944 } else if (!acpi_disabled) {
945 if (acpi_psci_use_hvc()) {
946 sdei_firmware_call = &sdei_smccc_hvc;
947 return SMCCC_CONDUIT_HVC;
948 } else {
949 sdei_firmware_call = &sdei_smccc_smc;
950 return SMCCC_CONDUIT_SMC;
951 }
952 }
953
954 return SMCCC_CONDUIT_NONE;
955 }
956
sdei_probe(struct platform_device * pdev)957 static int sdei_probe(struct platform_device *pdev)
958 {
959 int err;
960 u64 ver = 0;
961 int conduit;
962
963 conduit = sdei_get_conduit(pdev);
964 if (!sdei_firmware_call)
965 return 0;
966
967 err = sdei_api_get_version(&ver);
968 if (err) {
969 pr_err("Failed to get SDEI version: %d\n", err);
970 sdei_mark_interface_broken();
971 return err;
972 }
973
974 pr_info("SDEIv%d.%d (0x%x) detected in firmware.\n",
975 (int)SDEI_VERSION_MAJOR(ver), (int)SDEI_VERSION_MINOR(ver),
976 (int)SDEI_VERSION_VENDOR(ver));
977
978 if (SDEI_VERSION_MAJOR(ver) != 1) {
979 pr_warn("Conflicting SDEI version detected.\n");
980 sdei_mark_interface_broken();
981 return -EINVAL;
982 }
983
984 err = sdei_platform_reset();
985 if (err)
986 return err;
987
988 sdei_entry_point = sdei_arch_get_entry_point(conduit);
989 if (!sdei_entry_point) {
990 /* Not supported due to hardware or boot configuration */
991 sdei_mark_interface_broken();
992 return 0;
993 }
994
995 err = cpu_pm_register_notifier(&sdei_pm_nb);
996 if (err) {
997 pr_warn("Failed to register CPU PM notifier...\n");
998 goto error;
999 }
1000
1001 err = register_reboot_notifier(&sdei_reboot_nb);
1002 if (err) {
1003 pr_warn("Failed to register reboot notifier...\n");
1004 goto remove_cpupm;
1005 }
1006
1007 err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
1008 &sdei_cpuhp_up, &sdei_cpuhp_down);
1009 if (err) {
1010 pr_warn("Failed to register CPU hotplug notifier...\n");
1011 goto remove_reboot;
1012 }
1013
1014 return 0;
1015
1016 remove_reboot:
1017 unregister_reboot_notifier(&sdei_reboot_nb);
1018
1019 remove_cpupm:
1020 cpu_pm_unregister_notifier(&sdei_pm_nb);
1021
1022 error:
1023 sdei_mark_interface_broken();
1024 return err;
1025 }
1026
1027 static const struct of_device_id sdei_of_match[] = {
1028 { .compatible = "arm,sdei-1.0" },
1029 {}
1030 };
1031
1032 static struct platform_driver sdei_driver = {
1033 .driver = {
1034 .name = "sdei",
1035 .pm = &sdei_pm_ops,
1036 .of_match_table = sdei_of_match,
1037 },
1038 .probe = sdei_probe,
1039 };
1040
sdei_present_acpi(void)1041 static bool __init sdei_present_acpi(void)
1042 {
1043 acpi_status status;
1044 struct acpi_table_header *sdei_table_header;
1045
1046 if (acpi_disabled)
1047 return false;
1048
1049 status = acpi_get_table(ACPI_SIG_SDEI, 0, &sdei_table_header);
1050 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
1051 const char *msg = acpi_format_exception(status);
1052
1053 pr_info("Failed to get ACPI:SDEI table, %s\n", msg);
1054 }
1055 if (ACPI_FAILURE(status))
1056 return false;
1057
1058 acpi_put_table(sdei_table_header);
1059
1060 return true;
1061 }
1062
sdei_init(void)1063 static int __init sdei_init(void)
1064 {
1065 struct platform_device *pdev;
1066 int ret;
1067
1068 ret = platform_driver_register(&sdei_driver);
1069 if (ret || !sdei_present_acpi())
1070 return ret;
1071
1072 pdev = platform_device_register_simple(sdei_driver.driver.name,
1073 0, NULL, 0);
1074 if (IS_ERR(pdev)) {
1075 ret = PTR_ERR(pdev);
1076 platform_driver_unregister(&sdei_driver);
1077 pr_info("Failed to register ACPI:SDEI platform device %d\n",
1078 ret);
1079 }
1080
1081 return ret;
1082 }
1083
1084 /*
1085 * On an ACPI system SDEI needs to be ready before HEST:GHES tries to register
1086 * its events. ACPI is initialised from a subsys_initcall(), GHES is initialised
1087 * by device_initcall(). We want to be called in the middle.
1088 */
1089 subsys_initcall_sync(sdei_init);
1090
sdei_event_handler(struct pt_regs * regs,struct sdei_registered_event * arg)1091 int sdei_event_handler(struct pt_regs *regs,
1092 struct sdei_registered_event *arg)
1093 {
1094 int err;
1095 mm_segment_t orig_addr_limit;
1096 u32 event_num = arg->event_num;
1097
1098 /*
1099 * Save restore 'fs'.
1100 * The architecture's entry code save/restores 'fs' when taking an
1101 * exception from the kernel. This ensures addr_limit isn't inherited
1102 * if you interrupted something that allowed the uaccess routines to
1103 * access kernel memory.
1104 * Do the same here because this doesn't come via the same entry code.
1105 */
1106 orig_addr_limit = force_uaccess_begin();
1107
1108 err = arg->callback(event_num, regs, arg->callback_arg);
1109 if (err)
1110 pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
1111 event_num, smp_processor_id(), err);
1112
1113 force_uaccess_end(orig_addr_limit);
1114
1115 return err;
1116 }
1117 NOKPROBE_SYMBOL(sdei_event_handler);
1118