1 /**
2  * Copyright (c) 2024 Intel Corporation
3  * SPDX-License-Identifier: Apache-2.0
4  */
5 
6 #include <zephyr/kernel.h>
7 #include <kswap.h>
8 #include <ksched.h>
9 #include <ipi.h>
10 
11 #ifdef CONFIG_TRACE_SCHED_IPI
12 extern void z_trace_sched_ipi(void);
13 #endif
14 
15 
flag_ipi(uint32_t ipi_mask)16 void flag_ipi(uint32_t ipi_mask)
17 {
18 #if defined(CONFIG_SCHED_IPI_SUPPORTED)
19 	if (arch_num_cpus() > 1) {
20 		atomic_or(&_kernel.pending_ipi, (atomic_val_t)ipi_mask);
21 	}
22 #endif /* CONFIG_SCHED_IPI_SUPPORTED */
23 }
24 
25 /* Create a bitmask of CPUs that need an IPI. Note: sched_spinlock is held. */
ipi_mask_create(struct k_thread * thread)26 atomic_val_t ipi_mask_create(struct k_thread *thread)
27 {
28 	if (!IS_ENABLED(CONFIG_IPI_OPTIMIZE)) {
29 		return (CONFIG_MP_MAX_NUM_CPUS > 1) ? IPI_ALL_CPUS_MASK : 0;
30 	}
31 
32 	uint32_t  ipi_mask = 0;
33 	uint32_t  num_cpus = (uint32_t)arch_num_cpus();
34 	uint32_t  id = _current_cpu->id;
35 	struct k_thread *cpu_thread;
36 	bool   executable_on_cpu = true;
37 
38 	for (uint32_t i = 0; i < num_cpus; i++) {
39 		if (id == i) {
40 			continue;
41 		}
42 
43 		/*
44 		 * An IPI absolutely does not need to be sent if ...
45 		 * 1. the CPU is not active, or
46 		 * 2. <thread> can not execute on the target CPU
47 		 * ... and might not need to be sent if ...
48 		 * 3. the target CPU's active thread is not preemptible, or
49 		 * 4. the target CPU's active thread has a higher priority
50 		 *    (Items 3 & 4 may be overridden by a metaIRQ thread)
51 		 */
52 
53 #if defined(CONFIG_SCHED_CPU_MASK)
54 		executable_on_cpu = ((thread->base.cpu_mask & BIT(i)) != 0);
55 #endif
56 
57 		cpu_thread = _kernel.cpus[i].current;
58 		if ((cpu_thread != NULL) &&
59 		    (((z_sched_prio_cmp(cpu_thread, thread) < 0) &&
60 		      (thread_is_preemptible(cpu_thread))) ||
61 		     thread_is_metairq(thread)) && executable_on_cpu) {
62 			ipi_mask |= BIT(i);
63 		}
64 	}
65 
66 	return (atomic_val_t)ipi_mask;
67 }
68 
signal_pending_ipi(void)69 void signal_pending_ipi(void)
70 {
71 	/* Synchronization note: you might think we need to lock these
72 	 * two steps, but an IPI is idempotent.  It's OK if we do it
73 	 * twice.  All we require is that if a CPU sees the flag true,
74 	 * it is guaranteed to send the IPI, and if a core sets
75 	 * pending_ipi, the IPI will be sent the next time through
76 	 * this code.
77 	 */
78 #if defined(CONFIG_SCHED_IPI_SUPPORTED)
79 	if (arch_num_cpus() > 1) {
80 		uint32_t  cpu_bitmap;
81 
82 		cpu_bitmap = (uint32_t)atomic_clear(&_kernel.pending_ipi);
83 		if (cpu_bitmap != 0) {
84 #ifdef CONFIG_ARCH_HAS_DIRECTED_IPIS
85 			arch_sched_directed_ipi(cpu_bitmap);
86 #else
87 			arch_sched_broadcast_ipi();
88 #endif
89 		}
90 	}
91 #endif /* CONFIG_SCHED_IPI_SUPPORTED */
92 }
93 
z_sched_ipi(void)94 void z_sched_ipi(void)
95 {
96 	/* NOTE: When adding code to this, make sure this is called
97 	 * at appropriate location when !CONFIG_SCHED_IPI_SUPPORTED.
98 	 */
99 #ifdef CONFIG_TRACE_SCHED_IPI
100 	z_trace_sched_ipi();
101 #endif /* CONFIG_TRACE_SCHED_IPI */
102 
103 #ifdef CONFIG_TIMESLICING
104 	if (thread_is_sliceable(arch_current_thread())) {
105 		z_time_slice();
106 	}
107 #endif /* CONFIG_TIMESLICING */
108 }
109