1# Copyright (c) 2024 Intel Corp.
2# SPDX-License-Identifier: Apache-2.0
3#
4menu "SMP Options"
5
6config SMP
7	bool "Symmetric multiprocessing support"
8	depends on USE_SWITCH
9	depends on !ATOMIC_OPERATIONS_C
10	help
11	  When true, kernel will be built with SMP support, allowing
12	  more than one CPU to schedule Zephyr tasks at a time.
13
14config USE_SWITCH
15	bool "Use new-style _arch_switch instead of arch_swap"
16	depends on USE_SWITCH_SUPPORTED
17	help
18	  The _arch_switch() API is a lower level context switching
19	  primitive than the original arch_swap mechanism.  It is required
20	  for an SMP-aware scheduler, or if the architecture does not
21	  provide arch_swap.  In uniprocess situations where the
22	  architecture provides both, _arch_switch incurs more somewhat
23	  overhead and may be slower.
24
25config USE_SWITCH_SUPPORTED
26	bool
27	help
28	  Indicates whether _arch_switch() API is supported by the
29	  currently enabled platform. This option should be selected by
30	  platforms that implement it.
31
32config SMP_BOOT_DELAY
33	bool "Delay booting secondary cores"
34	depends on SMP
35	help
36	  By default Zephyr will boot all available CPUs during start up.
37	  Select this option to skip this and allow custom code
38	  (architecture/SoC/board/application) to boot secondary CPUs at
39	  a later time.
40
41config MP_NUM_CPUS
42	int "Number of CPUs/cores [DEPRECATED]"
43	default MP_MAX_NUM_CPUS
44	range 1 12
45	help
46	  This is deprecated, please use MP_MAX_NUM_CPUS instead.
47
48config MP_MAX_NUM_CPUS
49	int "Maximum number of CPUs/cores"
50	default 1
51	range 1 12
52	help
53	  Maximum number of multiprocessing-capable cores available to the
54	  multicpu API and SMP features.
55
56config SCHED_IPI_SUPPORTED
57	bool
58	help
59	  True if the architecture supports a call to arch_sched_broadcast_ipi()
60	  to broadcast an interrupt that will call z_sched_ipi() on other CPUs
61	  in the system.  Required for k_thread_abort() to operate with
62	  reasonable latency (otherwise we might have to wait for the other
63	  thread to take an interrupt, which can be arbitrarily far in the
64	  future).
65
66config SCHED_IPI_CASCADE
67	bool "Use cascading IPIs to correct localized scheduling"
68	depends on SCHED_CPU_MASK && !SCHED_CPU_MASK_PIN_ONLY
69	default n
70	help
71	  Threads that are preempted by a local thread (a thread that is
72	  restricted by its CPU mask to execute on a subset of all CPUs) may
73	  trigger additional IPIs when the preempted thread is of higher
74	  priority than a currently executing thread on another CPU. Although
75	  these cascading IPIs will ensure that the system will settle upon a
76	  valid set of high priority threads, it comes at a performance cost.
77
78config TRACE_SCHED_IPI
79	bool "Test IPI"
80	help
81	  When true, it will add a hook into z_sched_ipi(), in order
82	  to check if schedule IPI has called or not, for testing
83	  purpose.
84	depends on SCHED_IPI_SUPPORTED
85	depends on MP_MAX_NUM_CPUS>1
86
87config IPI_OPTIMIZE
88	bool "Optimize IPI delivery"
89	default n
90	depends on SCHED_IPI_SUPPORTED && MP_MAX_NUM_CPUS>1
91	help
92	  When selected, the kernel will attempt to determine the minimum
93	  set of CPUs that need an IPI to trigger a reschedule in response to
94	  a thread newly made ready for execution. This increases the
95	  computation required at every scheduler operation by a value that is
96	  O(N) in the number of CPUs, and in exchange reduces the number of
97	  interrupts delivered. Which to choose is going to depend on
98	  application behavior. If the architecture also supports directing
99	  IPIs to specific CPUs then this has the potential to significantly
100	  reduce the number of IPIs (and consequently ISRs) processed by the
101	  system as the number of CPUs increases. If not, the only benefit
102	  would be to not issue any IPIs if the newly readied thread is of
103	  lower priority than all the threads currently executing on other CPUs.
104
105config KERNEL_COHERENCE
106	bool "Place all shared data into coherent memory"
107	depends on ARCH_HAS_COHERENCE
108	default y if SMP && MP_MAX_NUM_CPUS > 1
109	select THREAD_STACK_INFO
110	help
111	  When available and selected, the kernel will build in a mode
112	  where all shared data is placed in multiprocessor-coherent
113	  (generally "uncached") memory.  Thread stacks will remain
114	  cached, as will application memory declared with
115	  __incoherent.  This is intended for Zephyr SMP kernels
116	  running on cache-incoherent architectures only.  Note that
117	  when this is selected, there is an implicit API change that
118	  assumes cache coherence to any memory passed to the kernel.
119	  Code that creates kernel data structures in uncached regions
120	  may fail strangely.  Some assertions exist to catch these
121	  mistakes, but not all circumstances can be tested.
122
123config TICKET_SPINLOCKS
124	bool "Ticket spinlocks for lock acquisition fairness [EXPERIMENTAL]"
125	select EXPERIMENTAL
126	help
127	  Basic spinlock implementation is based on single
128	  atomic variable and doesn't guarantee locking fairness
129	  across multiple CPUs. It's even possible that single CPU
130	  will win the contention every time which will result
131	  in a live-lock.
132	  Ticket spinlocks provide a FIFO order of lock acquisition
133	  which resolves such unfairness issue at the cost of slightly
134	  increased memory footprint.
135
136endmenu
137