// SPDX-License-Identifier: BSD-3-Clause // // Copyright(c) 2019-2021 Intel Corporation. All rights reserved. // // Author: Tomasz Lauda #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Currently the Zephyr clock rate is part it's Kconfig known at build time. * SOF on Intel CAVS platforms currently only aligns with Zephyr when both * use the CAVS 19.2 MHz SSP clock. TODO - needs runtime alignment. */ #if CONFIG_XTENSA && CONFIG_CAVS && !CONFIG_CAVS_TIMER #error "Zephyr uses 19.2MHz clock derived from SSP which must be enabled." #endif #define ZEPHYR_LL_STACK_SIZE 8192 #define LL_TIMER_PERIOD_TICKS (CONFIG_SYS_CLOCK_TICKS_PER_SEC * LL_TIMER_PERIOD_US / 1000000ULL) K_KERNEL_STACK_ARRAY_DEFINE(ll_sched_stack, CONFIG_CORE_COUNT, ZEPHYR_LL_STACK_SIZE); struct zephyr_domain_thread { struct k_thread ll_thread; struct k_sem sem; void (*handler)(void *arg); void *arg; }; struct zephyr_domain { struct k_timer timer; struct timer *ll_timer; struct zephyr_domain_thread domain_thread[CONFIG_CORE_COUNT]; struct ll_schedule_domain *ll_domain; }; /* perf measurement windows size 2^x */ #define CYCLES_WINDOW_SIZE 10 static void zephyr_domain_thread_fn(void *p1, void *p2, void *p3) { struct zephyr_domain *zephyr_domain = p1; int core = cpu_get_id(); struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core; unsigned int runs = 0, overruns = 0, cycles_sum = 0, cycles_max = 0; unsigned int cycles0, cycles1, diff, timer_fired; for (;;) { /* immediately go to sleep, waiting to be woken up by the timer */ k_sem_take(&dt->sem, K_FOREVER); cycles0 = k_cycle_get_32(); dt->handler(dt->arg); cycles1 = k_cycle_get_32(); if (cycles1 > cycles0) diff = cycles1 - cycles0; else diff = UINT32_MAX - cycles0 + cycles1; timer_fired = k_timer_status_get(&zephyr_domain->timer); if (timer_fired > 1) overruns++; cycles_sum += diff; cycles_max = diff > cycles_max ? diff : cycles_max; if (++runs == 1 << CYCLES_WINDOW_SIZE) { cycles_sum >>= CYCLES_WINDOW_SIZE; tr_info(&ll_tr, "ll timer avg %u, max %u, overruns %u", cycles_sum, cycles_max, overruns); cycles_sum = 0; cycles_max = 0; runs = 0; overruns = 0; } } } /* Timer callback: runs in timer IRQ context */ static void zephyr_domain_timer_fn(struct k_timer *timer) { struct zephyr_domain *zephyr_domain = k_timer_user_data_get(timer); uint64_t now = k_uptime_ticks(); int core; if (!zephyr_domain) return; /* * This loop should only run once, but for the (nearly) impossible * case of a missed interrupt, add as many periods as needed. In fact * we don't need struct ll_schedule_domain::next tick and * struct task::start for a strictly periodic Zephyr-based LL scheduler * implementation, they will be removed after a short grace period. */ while (zephyr_domain->ll_domain->next_tick <= now) zephyr_domain->ll_domain->next_tick += LL_TIMER_PERIOD_TICKS; for (core = 0; core < CONFIG_CORE_COUNT; core++) { struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core; if (dt->handler) k_sem_give(&dt->sem); } } static int zephyr_domain_register(struct ll_schedule_domain *domain, struct task *task, void (*handler)(void *arg), void *arg) { struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain); int core = cpu_get_id(); struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core; char thread_name[] = "ll_thread0"; k_tid_t thread; tr_dbg(&ll_tr, "zephyr_domain_register()"); /* domain work only needs registered once on each core */ if (dt->handler) return 0; dt->handler = handler; dt->arg = arg; /* 10 is rather random, we better not accumulate 10 missed timer interrupts */ k_sem_init(&dt->sem, 0, 10); thread_name[sizeof(thread_name) - 2] = '0' + core; thread = k_thread_create(&dt->ll_thread, ll_sched_stack[core], ZEPHYR_LL_STACK_SIZE, zephyr_domain_thread_fn, zephyr_domain, NULL, NULL, -CONFIG_NUM_COOP_PRIORITIES, 0, K_FOREVER); k_thread_cpu_mask_clear(thread); k_thread_cpu_mask_enable(thread, core); k_thread_name_set(thread, thread_name); k_thread_start(thread); if (!k_timer_user_data_get(&zephyr_domain->timer)) { k_timeout_t start = {0}; k_timer_init(&zephyr_domain->timer, zephyr_domain_timer_fn, NULL); k_timer_user_data_set(&zephyr_domain->timer, zephyr_domain); k_timer_start(&zephyr_domain->timer, start, K_USEC(LL_TIMER_PERIOD_US)); domain->next_tick = k_uptime_ticks() + k_timer_remaining_ticks(&zephyr_domain->timer); } tr_info(&ll_tr, "zephyr_domain_register domain->type %d domain->clk %d domain->ticks_per_ms %d period %d", domain->type, domain->clk, domain->ticks_per_ms, (uint32_t)LL_TIMER_PERIOD_US); return 0; } static int zephyr_domain_unregister(struct ll_schedule_domain *domain, struct task *task, uint32_t num_tasks) { struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain); int core = cpu_get_id(); tr_dbg(&ll_tr, "zephyr_domain_unregister()"); /* tasks still registered on this core */ if (num_tasks) return 0; if (!atomic_read(&domain->total_num_tasks)) { k_timer_stop(&zephyr_domain->timer); k_timer_user_data_set(&zephyr_domain->timer, NULL); } zephyr_domain->domain_thread[core].handler = NULL; tr_info(&ll_tr, "zephyr_domain_unregister domain->type %d domain->clk %d", domain->type, domain->clk); /* * If running in the context of the domain thread, k_thread_abort() will * not return */ k_thread_abort(&zephyr_domain->domain_thread[core].ll_thread); return 0; } static bool zephyr_domain_is_pending(struct ll_schedule_domain *domain, struct task *task, struct comp_dev **comp) { return task->start <= k_uptime_ticks(); } static const struct ll_schedule_domain_ops zephyr_domain_ops = { .domain_register = zephyr_domain_register, .domain_unregister = zephyr_domain_unregister, .domain_is_pending = zephyr_domain_is_pending }; struct ll_schedule_domain *zephyr_domain_init(struct timer *timer, int clk) { struct ll_schedule_domain *domain; struct zephyr_domain *zephyr_domain; domain = domain_init(SOF_SCHEDULE_LL_TIMER, clk, false, &zephyr_domain_ops); zephyr_domain = rzalloc(SOF_MEM_ZONE_SYS_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(*zephyr_domain)); zephyr_domain->ll_timer = timer; zephyr_domain->ll_domain = domain; ll_sch_domain_set_pdata(domain, zephyr_domain); return domain; }