1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 // Copyright(c) 2019-2021 Intel Corporation. All rights reserved.
4 //
5 // Author: Tomasz Lauda <tomasz.lauda@linux.intel.com>
6
7 #include <sof/drivers/timer.h>
8 #include <sof/lib/alloc.h>
9 #include <sof/lib/cpu.h>
10 #include <sof/lib/memory.h>
11 #include <sof/math/numbers.h>
12 #include <sof/platform.h>
13 #include <sof/schedule/ll_schedule.h>
14 #include <sof/schedule/ll_schedule_domain.h>
15 #include <sof/schedule/schedule.h>
16 #include <sof/schedule/task.h>
17 #include <ipc/topology.h>
18 #include <limits.h>
19 #include <stdbool.h>
20 #include <stddef.h>
21 #include <stdint.h>
22
23 #include <kernel.h>
24 #include <sys_clock.h>
25
26 /*
27 * Currently the Zephyr clock rate is part it's Kconfig known at build time.
28 * SOF on Intel CAVS platforms currently only aligns with Zephyr when both
29 * use the CAVS 19.2 MHz SSP clock. TODO - needs runtime alignment.
30 */
31 #if CONFIG_XTENSA && CONFIG_CAVS && !CONFIG_CAVS_TIMER
32 #error "Zephyr uses 19.2MHz clock derived from SSP which must be enabled."
33 #endif
34
35 #define ZEPHYR_LL_STACK_SIZE 8192
36
37 #define LL_TIMER_PERIOD_TICKS (CONFIG_SYS_CLOCK_TICKS_PER_SEC * LL_TIMER_PERIOD_US / 1000000ULL)
38
39 K_KERNEL_STACK_ARRAY_DEFINE(ll_sched_stack, CONFIG_CORE_COUNT, ZEPHYR_LL_STACK_SIZE);
40
41 struct zephyr_domain_thread {
42 struct k_thread ll_thread;
43 struct k_sem sem;
44 void (*handler)(void *arg);
45 void *arg;
46 };
47
48 struct zephyr_domain {
49 struct k_timer timer;
50 struct timer *ll_timer;
51 struct zephyr_domain_thread domain_thread[CONFIG_CORE_COUNT];
52 struct ll_schedule_domain *ll_domain;
53 };
54
55 /* perf measurement windows size 2^x */
56 #define CYCLES_WINDOW_SIZE 10
57
zephyr_domain_thread_fn(void * p1,void * p2,void * p3)58 static void zephyr_domain_thread_fn(void *p1, void *p2, void *p3)
59 {
60 struct zephyr_domain *zephyr_domain = p1;
61 int core = cpu_get_id();
62 struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core;
63 unsigned int runs = 0, overruns = 0, cycles_sum = 0, cycles_max = 0;
64 unsigned int cycles0, cycles1, diff, timer_fired;
65
66 for (;;) {
67 /* immediately go to sleep, waiting to be woken up by the timer */
68 k_sem_take(&dt->sem, K_FOREVER);
69
70 cycles0 = k_cycle_get_32();
71 dt->handler(dt->arg);
72 cycles1 = k_cycle_get_32();
73
74 if (cycles1 > cycles0)
75 diff = cycles1 - cycles0;
76 else
77 diff = UINT32_MAX - cycles0 + cycles1;
78
79 timer_fired = k_timer_status_get(&zephyr_domain->timer);
80 if (timer_fired > 1)
81 overruns++;
82
83 cycles_sum += diff;
84 cycles_max = diff > cycles_max ? diff : cycles_max;
85
86 if (++runs == 1 << CYCLES_WINDOW_SIZE) {
87 cycles_sum >>= CYCLES_WINDOW_SIZE;
88 tr_info(&ll_tr, "ll timer avg %u, max %u, overruns %u",
89 cycles_sum, cycles_max, overruns);
90 cycles_sum = 0;
91 cycles_max = 0;
92 runs = 0;
93 overruns = 0;
94 }
95 }
96 }
97
98 /* Timer callback: runs in timer IRQ context */
zephyr_domain_timer_fn(struct k_timer * timer)99 static void zephyr_domain_timer_fn(struct k_timer *timer)
100 {
101 struct zephyr_domain *zephyr_domain = k_timer_user_data_get(timer);
102 uint64_t now = k_uptime_ticks();
103 int core;
104
105 if (!zephyr_domain)
106 return;
107
108 /*
109 * This loop should only run once, but for the (nearly) impossible
110 * case of a missed interrupt, add as many periods as needed. In fact
111 * we don't need struct ll_schedule_domain::next tick and
112 * struct task::start for a strictly periodic Zephyr-based LL scheduler
113 * implementation, they will be removed after a short grace period.
114 */
115 while (zephyr_domain->ll_domain->next_tick <= now)
116 zephyr_domain->ll_domain->next_tick += LL_TIMER_PERIOD_TICKS;
117
118 for (core = 0; core < CONFIG_CORE_COUNT; core++) {
119 struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core;
120
121 if (dt->handler)
122 k_sem_give(&dt->sem);
123 }
124 }
125
zephyr_domain_register(struct ll_schedule_domain * domain,struct task * task,void (* handler)(void * arg),void * arg)126 static int zephyr_domain_register(struct ll_schedule_domain *domain,
127 struct task *task,
128 void (*handler)(void *arg), void *arg)
129 {
130 struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain);
131 int core = cpu_get_id();
132 struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core;
133 char thread_name[] = "ll_thread0";
134 k_tid_t thread;
135
136 tr_dbg(&ll_tr, "zephyr_domain_register()");
137
138 /* domain work only needs registered once on each core */
139 if (dt->handler)
140 return 0;
141
142 dt->handler = handler;
143 dt->arg = arg;
144
145 /* 10 is rather random, we better not accumulate 10 missed timer interrupts */
146 k_sem_init(&dt->sem, 0, 10);
147
148 thread_name[sizeof(thread_name) - 2] = '0' + core;
149
150 thread = k_thread_create(&dt->ll_thread,
151 ll_sched_stack[core],
152 ZEPHYR_LL_STACK_SIZE,
153 zephyr_domain_thread_fn, zephyr_domain, NULL, NULL,
154 -CONFIG_NUM_COOP_PRIORITIES, 0, K_FOREVER);
155
156 k_thread_cpu_mask_clear(thread);
157 k_thread_cpu_mask_enable(thread, core);
158 k_thread_name_set(thread, thread_name);
159
160 k_thread_start(thread);
161
162 if (!k_timer_user_data_get(&zephyr_domain->timer)) {
163 k_timeout_t start = {0};
164
165 k_timer_init(&zephyr_domain->timer, zephyr_domain_timer_fn, NULL);
166 k_timer_user_data_set(&zephyr_domain->timer, zephyr_domain);
167
168 k_timer_start(&zephyr_domain->timer, start, K_USEC(LL_TIMER_PERIOD_US));
169 domain->next_tick = k_uptime_ticks() +
170 k_timer_remaining_ticks(&zephyr_domain->timer);
171 }
172
173 tr_info(&ll_tr, "zephyr_domain_register domain->type %d domain->clk %d domain->ticks_per_ms %d period %d",
174 domain->type, domain->clk, domain->ticks_per_ms, (uint32_t)LL_TIMER_PERIOD_US);
175
176 return 0;
177 }
178
zephyr_domain_unregister(struct ll_schedule_domain * domain,struct task * task,uint32_t num_tasks)179 static int zephyr_domain_unregister(struct ll_schedule_domain *domain,
180 struct task *task, uint32_t num_tasks)
181 {
182 struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain);
183 int core = cpu_get_id();
184
185 tr_dbg(&ll_tr, "zephyr_domain_unregister()");
186
187 /* tasks still registered on this core */
188 if (num_tasks)
189 return 0;
190
191 if (!atomic_read(&domain->total_num_tasks)) {
192 k_timer_stop(&zephyr_domain->timer);
193 k_timer_user_data_set(&zephyr_domain->timer, NULL);
194 }
195
196 zephyr_domain->domain_thread[core].handler = NULL;
197
198 tr_info(&ll_tr, "zephyr_domain_unregister domain->type %d domain->clk %d",
199 domain->type, domain->clk);
200
201 /*
202 * If running in the context of the domain thread, k_thread_abort() will
203 * not return
204 */
205 k_thread_abort(&zephyr_domain->domain_thread[core].ll_thread);
206
207 return 0;
208 }
209
zephyr_domain_is_pending(struct ll_schedule_domain * domain,struct task * task,struct comp_dev ** comp)210 static bool zephyr_domain_is_pending(struct ll_schedule_domain *domain,
211 struct task *task, struct comp_dev **comp)
212 {
213 return task->start <= k_uptime_ticks();
214 }
215
216 static const struct ll_schedule_domain_ops zephyr_domain_ops = {
217 .domain_register = zephyr_domain_register,
218 .domain_unregister = zephyr_domain_unregister,
219 .domain_is_pending = zephyr_domain_is_pending
220 };
221
zephyr_domain_init(struct timer * timer,int clk)222 struct ll_schedule_domain *zephyr_domain_init(struct timer *timer, int clk)
223 {
224 struct ll_schedule_domain *domain;
225 struct zephyr_domain *zephyr_domain;
226
227 domain = domain_init(SOF_SCHEDULE_LL_TIMER, clk, false,
228 &zephyr_domain_ops);
229
230 zephyr_domain = rzalloc(SOF_MEM_ZONE_SYS_SHARED, 0, SOF_MEM_CAPS_RAM,
231 sizeof(*zephyr_domain));
232
233 zephyr_domain->ll_timer = timer;
234 zephyr_domain->ll_domain = domain;
235
236 ll_sch_domain_set_pdata(domain, zephyr_domain);
237
238 return domain;
239 }
240