1 /*
2  * Copyright (c) 2021 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <ipi.h>
8 #include <ksched.h>
9 
10 #include <zephyr/kernel.h>
11 
12 #define CLINT_NODE DT_NODELABEL(clint)
13 #if !DT_NODE_EXISTS(CLINT_NODE)
14 #error "Label 'clint' is not defined in the devicetree."
15 #endif
16 #define MSIP_BASE DT_REG_ADDR_RAW(CLINT_NODE)
17 #define MSIP(hartid) ((volatile uint32_t *)MSIP_BASE)[hartid]
18 
19 static atomic_val_t cpu_pending_ipi[CONFIG_MP_MAX_NUM_CPUS];
20 #define IPI_SCHED     0
21 #define IPI_FPU_FLUSH 1
22 
arch_sched_directed_ipi(uint32_t cpu_bitmap)23 void arch_sched_directed_ipi(uint32_t cpu_bitmap)
24 {
25 	unsigned int key = arch_irq_lock();
26 	unsigned int id = _current_cpu->id;
27 	unsigned int num_cpus = arch_num_cpus();
28 
29 	for (unsigned int i = 0; i < num_cpus; i++) {
30 		if ((i != id) && _kernel.cpus[i].arch.online && ((cpu_bitmap & BIT(i)) != 0)) {
31 			atomic_set_bit(&cpu_pending_ipi[i], IPI_SCHED);
32 			MSIP(_kernel.cpus[i].arch.hartid) = 1;
33 		}
34 	}
35 
36 	arch_irq_unlock(key);
37 }
38 
39 #ifdef CONFIG_FPU_SHARING
arch_flush_fpu_ipi(unsigned int cpu)40 void arch_flush_fpu_ipi(unsigned int cpu)
41 {
42 	atomic_set_bit(&cpu_pending_ipi[cpu], IPI_FPU_FLUSH);
43 	MSIP(_kernel.cpus[cpu].arch.hartid) = 1;
44 }
45 #endif /* CONFIG_FPU_SHARING */
46 
sched_ipi_handler(const void * unused)47 static void sched_ipi_handler(const void *unused)
48 {
49 	ARG_UNUSED(unused);
50 
51 	MSIP(csr_read(mhartid)) = 0;
52 
53 	atomic_val_t pending_ipi = atomic_clear(&cpu_pending_ipi[_current_cpu->id]);
54 
55 	if (pending_ipi & ATOMIC_MASK(IPI_SCHED)) {
56 		z_sched_ipi();
57 	}
58 #ifdef CONFIG_FPU_SHARING
59 	if (pending_ipi & ATOMIC_MASK(IPI_FPU_FLUSH)) {
60 		/* disable IRQs */
61 		csr_clear(mstatus, MSTATUS_IEN);
62 		/* perform the flush */
63 		arch_flush_local_fpu();
64 		/*
65 		 * No need to re-enable IRQs here as long as
66 		 * this remains the last case.
67 		 */
68 	}
69 #endif /* CONFIG_FPU_SHARING */
70 }
71 
72 #ifdef CONFIG_FPU_SHARING
73 /*
74  * Make sure there is no pending FPU flush request for this CPU while
75  * waiting for a contended spinlock to become available. This prevents
76  * a deadlock when the lock we need is already taken by another CPU
77  * that also wants its FPU content to be reinstated while such content
78  * is still live in this CPU's FPU.
79  */
arch_spin_relax(void)80 void arch_spin_relax(void)
81 {
82 	atomic_val_t *pending_ipi = &cpu_pending_ipi[_current_cpu->id];
83 
84 	if (atomic_test_and_clear_bit(pending_ipi, IPI_FPU_FLUSH)) {
85 		/*
86 		 * We may not be in IRQ context here hence cannot use
87 		 * arch_flush_local_fpu() directly.
88 		 */
89 		arch_float_disable(_current_cpu->arch.fpu_owner);
90 	}
91 }
92 #endif /* CONFIG_FPU_SHARING */
93 
arch_smp_init(void)94 int arch_smp_init(void)
95 {
96 
97 	IRQ_CONNECT(RISCV_IRQ_MSOFT, 0, sched_ipi_handler, NULL, 0);
98 	irq_enable(RISCV_IRQ_MSOFT);
99 
100 	return 0;
101 }
102