1 // SPDX-License-Identifier: BSD-3-Clause
2 /*
3 * Copyright(c) 2020 Intel Corporation. All rights reserved.
4 *
5 * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
6 */
7
8 #include <sof/init.h>
9 #include <sof/lib/alloc.h>
10 #include <sof/drivers/idc.h>
11 #include <sof/drivers/interrupt.h>
12 #include <sof/drivers/interrupt-map.h>
13 #include <sof/lib/dma.h>
14 #include <sof/schedule/schedule.h>
15 #include <platform/drivers/interrupt.h>
16 #include <platform/lib/memory.h>
17 #include <sof/platform.h>
18 #include <sof/lib/notifier.h>
19 #include <sof/lib/pm_runtime.h>
20 #include <sof/audio/pipeline.h>
21 #include <sof/audio/component_ext.h>
22 #include <sof/trace/trace.h>
23
24 /* Zephyr includes */
25 #include <device.h>
26 #include <soc.h>
27 #include <kernel.h>
28
29 #ifndef CONFIG_KERNEL_COHERENCE
30 #include <arch/xtensa/cache.h>
31 #endif
32
33 extern K_KERNEL_STACK_ARRAY_DEFINE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS,
34 CONFIG_ISR_STACK_SIZE);
35
36 /* 300aaad4-45d2-8313-25d0-5e1d6086cdd1 */
37 DECLARE_SOF_RT_UUID("zephyr", zephyr_uuid, 0x300aaad4, 0x45d2, 0x8313,
38 0x25, 0xd0, 0x5e, 0x1d, 0x60, 0x86, 0xcd, 0xd1);
39
40 DECLARE_TR_CTX(zephyr_tr, SOF_UUID(zephyr_uuid), LOG_LEVEL_INFO);
41
42 /*
43 * Memory - Create Zephyr HEAP for SOF.
44 *
45 * Currently functional but some items still WIP.
46 */
47
48 #ifndef HEAP_RUNTIME_SIZE
49 #define HEAP_RUNTIME_SIZE 0
50 #endif
51
52 /* system size not declared on some platforms */
53 #ifndef HEAP_SYSTEM_SIZE
54 #define HEAP_SYSTEM_SIZE 0
55 #endif
56
57 /* The Zephyr heap */
58
59 /* use cached heap for non-shared allocations */
60 /*#define ENABLE_CACHED_HEAP 1*/
61
62 #ifdef CONFIG_IMX
63 #define HEAPMEM_SIZE (HEAP_SYSTEM_SIZE + HEAP_RUNTIME_SIZE + HEAP_BUFFER_SIZE)
64
65 #undef ENABLE_CACHED_HEAP
66
67 /*
68 * Include heapmem variable in .heap_mem section, otherwise the HEAPMEM_SIZE is
69 * duplicated in two sections and the sdram0 region overflows.
70 */
71 __section(".heap_mem") static uint8_t __aligned(64) heapmem[HEAPMEM_SIZE];
72
73 #else
74
75 #define HEAPMEM_SHARED_SIZE (HEAP_SYSTEM_SIZE + HEAP_RUNTIME_SIZE + \
76 HEAP_RUNTIME_SHARED_SIZE + HEAP_SYSTEM_SHARED_SIZE)
77 #ifdef ENABLE_CACHED_HEAP
78 #define HEAPMEM_SIZE HEAP_BUFFER_SIZE
79 #else
80 #define HEAPMEM_SIZE (HEAP_BUFFER_SIZE + HEAPMEM_SHARED_SIZE)
81 #endif
82
83 static uint8_t __aligned(PLATFORM_DCACHE_ALIGN)heapmem[HEAPMEM_SIZE];
84 #ifdef ENABLE_CACHED_HEAP
85 static uint8_t __aligned(PLATFORM_DCACHE_ALIGN)heapmem_shared[HEAPMEM_SHARED_SIZE];
86 static struct k_heap sof_heap_shared;
87 #endif
88
89 #endif
90
91 static struct k_heap sof_heap;
92
statics_init(const struct device * unused)93 static int statics_init(const struct device *unused)
94 {
95 ARG_UNUSED(unused);
96
97 sys_heap_init(&sof_heap.heap, heapmem, HEAPMEM_SIZE);
98 #ifdef ENABLE_CACHED_HEAP
99 sys_heap_init(&sof_heap_shared.heap, heapmem_shared, HEAPMEM_SHARED_SIZE);
100 #endif
101 return 0;
102 }
103
104 SYS_INIT(statics_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
105
heap_alloc_aligned(struct k_heap * h,size_t align,size_t bytes)106 static void *heap_alloc_aligned(struct k_heap *h, size_t align, size_t bytes)
107 {
108 void *ret = NULL;
109
110 k_spinlock_key_t key = k_spin_lock(&h->lock);
111
112 ret = sys_heap_aligned_alloc(&h->heap, align, bytes);
113
114 k_spin_unlock(&h->lock, key);
115
116 return ret;
117 }
118
heap_alloc_aligned_cached(struct k_heap * h,size_t min_align,size_t bytes)119 static void *heap_alloc_aligned_cached(struct k_heap *h, size_t min_align, size_t bytes)
120 {
121 #ifdef ENABLE_CACHED_HEAP
122 unsigned int align = MAX(PLATFORM_DCACHE_ALIGN, min_align);
123 unsigned int aligned_size = ALIGN_UP(bytes, align);
124 void *ptr;
125
126 /*
127 * Zephyr sys_heap stores metadata at start of each
128 * heap allocation. To ensure no allocated cached buffer
129 * overlaps the same cacheline with the metadata chunk,
130 * align both allocation start and size of allocation
131 * to cacheline.
132 */
133 ptr = heap_alloc_aligned(h, align, aligned_size);
134 if (ptr) {
135 ptr = uncache_to_cache(ptr);
136
137 /*
138 * Heap can be used by different cores, so cache
139 * needs to be invalidated before next user
140 */
141 z_xtensa_cache_inv(ptr, aligned_size);
142 }
143
144 return ptr;
145 #else
146 return heap_alloc_aligned(&sof_heap, min_align, bytes);
147 #endif
148 }
149
heap_free(struct k_heap * h,void * mem)150 static void heap_free(struct k_heap *h, void *mem)
151 {
152 k_spinlock_key_t key = k_spin_lock(&h->lock);
153
154 sys_heap_free(&h->heap, mem);
155
156 k_spin_unlock(&h->lock, key);
157 }
158
zone_is_cached(enum mem_zone zone)159 static inline bool zone_is_cached(enum mem_zone zone)
160 {
161 #ifndef ENABLE_CACHED_HEAP
162 return false;
163 #endif
164
165 if (zone == SOF_MEM_ZONE_BUFFER)
166 return true;
167
168 return false;
169 }
170
rmalloc(enum mem_zone zone,uint32_t flags,uint32_t caps,size_t bytes)171 void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes)
172 {
173 if (zone_is_cached(zone))
174 return heap_alloc_aligned_cached(&sof_heap, 0, bytes);
175
176 #ifdef ENABLE_CACHED_HEAP
177 return heap_alloc_aligned(&sof_heap_shared, 8, bytes);
178 #else
179 return heap_alloc_aligned(&sof_heap, 8, bytes);
180 #endif
181 }
182
183 /* Use SOF_MEM_ZONE_BUFFER at the moment */
rbrealloc_align(void * ptr,uint32_t flags,uint32_t caps,size_t bytes,size_t old_bytes,uint32_t alignment)184 void *rbrealloc_align(void *ptr, uint32_t flags, uint32_t caps, size_t bytes,
185 size_t old_bytes, uint32_t alignment)
186 {
187 void *new_ptr;
188
189 if (!ptr) {
190 /* TODO: Use correct zone */
191 return rballoc_align(flags, caps, bytes, alignment);
192 }
193
194 /* Original version returns NULL without freeing this memory */
195 if (!bytes) {
196 /* TODO: Should we call rfree(ptr); */
197 tr_err(&zephyr_tr, "realloc failed for 0 bytes");
198 return NULL;
199 }
200
201 new_ptr = rballoc_align(flags, caps, bytes, alignment);
202 if (!new_ptr) {
203 return NULL;
204 }
205
206 if (!(flags & SOF_MEM_FLAG_NO_COPY)) {
207 memcpy(new_ptr, ptr, MIN(bytes, old_bytes));
208 }
209
210 rfree(ptr);
211
212 tr_info(&zephyr_tr, "rbealloc: new ptr %p", new_ptr);
213
214 return new_ptr;
215 }
216
217 /**
218 * Similar to rmalloc(), guarantees that returned block is zeroed.
219 *
220 * @note Do not use for buffers (SOF_MEM_ZONE_BUFFER zone).
221 * rballoc(), rballoc_align() to allocate memory for buffers.
222 */
rzalloc(enum mem_zone zone,uint32_t flags,uint32_t caps,size_t bytes)223 void *rzalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes)
224 {
225 void *ptr = rmalloc(zone, flags, caps, bytes);
226
227 memset(ptr, 0, bytes);
228
229 return ptr;
230 }
231
232 /**
233 * Allocates memory block from SOF_MEM_ZONE_BUFFER.
234 * @param flags Flags, see SOF_MEM_FLAG_...
235 * @param caps Capabilities, see SOF_MEM_CAPS_...
236 * @param bytes Size in bytes.
237 * @param alignment Alignment in bytes.
238 * @return Pointer to the allocated memory or NULL if failed.
239 */
rballoc_align(uint32_t flags,uint32_t caps,size_t bytes,uint32_t alignment)240 void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes,
241 uint32_t alignment)
242 {
243 return heap_alloc_aligned_cached(&sof_heap, alignment, bytes);
244 }
245
246 /*
247 * Free's memory allocated by above alloc calls.
248 */
rfree(void * ptr)249 void rfree(void *ptr)
250 {
251 if (!ptr)
252 return;
253
254 #ifdef ENABLE_CACHED_HEAP
255 /* select heap based on address range */
256 if (is_uncached(ptr)) {
257 heap_free(&sof_heap_shared, ptr);
258 return;
259 }
260
261 ptr = cache_to_uncache(ptr);
262 #endif
263
264 heap_free(&sof_heap, ptr);
265 }
266
267 /* debug only - only needed for linking */
heap_trace_all(int force)268 void heap_trace_all(int force)
269 {
270 }
271
272 /*
273 * Interrupts.
274 *
275 * Mostly mapped. Still needs some linkage symbols that can be removed later.
276 */
277
278 /* needed for linkage only */
279 const char irq_name_level2[] = "level2";
280 const char irq_name_level5[] = "level5";
281
interrupt_register(uint32_t irq,void (* handler)(void * arg),void * arg)282 int interrupt_register(uint32_t irq, void(*handler)(void *arg), void *arg)
283 {
284 #ifdef CONFIG_DYNAMIC_INTERRUPTS
285 return arch_irq_connect_dynamic(irq, 0, (void (*)(const void *))handler,
286 arg, 0);
287 #else
288 tr_err(&zephyr_tr, "Cannot register handler for IRQ %u: dynamic IRQs are disabled",
289 irq);
290 return -EOPNOTSUPP;
291 #endif
292 }
293
294 #if !CONFIG_LIBRARY
295 /* unregister an IRQ handler - matches on IRQ number and data ptr */
interrupt_unregister(uint32_t irq,const void * arg)296 void interrupt_unregister(uint32_t irq, const void *arg)
297 {
298 /*
299 * There is no "unregister" (or "disconnect") for
300 * interrupts in Zephyr.
301 */
302 z_soc_irq_disable(irq);
303 }
304
305 /* enable an interrupt source - IRQ needs mapped to Zephyr,
306 * arg is used to match.
307 */
interrupt_enable(uint32_t irq,void * arg)308 uint32_t interrupt_enable(uint32_t irq, void *arg)
309 {
310 z_soc_irq_enable(irq);
311
312 return 0;
313 }
314
315 /* disable interrupt */
interrupt_disable(uint32_t irq,void * arg)316 uint32_t interrupt_disable(uint32_t irq, void *arg)
317 {
318 z_soc_irq_disable(irq);
319
320 return 0;
321 }
322 #endif
323
324 /*
325 * i.MX uses the IRQ_STEER
326 */
327 #if !CONFIG_IMX
328 /*
329 * CAVS IRQs are multilevel whereas BYT and BDW are DSP level only.
330 */
interrupt_get_irq(unsigned int irq,const char * cascade)331 int interrupt_get_irq(unsigned int irq, const char *cascade)
332 {
333 #if CONFIG_SOC_SERIES_INTEL_ADSP_BAYTRAIL ||\
334 CONFIG_SOC_SERIES_INTEL_ADSP_BROADWELL || \
335 CONFIG_LIBRARY
336 return irq;
337 #else
338 if (cascade == irq_name_level2)
339 return SOC_AGGREGATE_IRQ(irq, IRQ_NUM_EXT_LEVEL2);
340 if (cascade == irq_name_level5)
341 return SOC_AGGREGATE_IRQ(irq, IRQ_NUM_EXT_LEVEL5);
342
343 return SOC_AGGREGATE_IRQ(0, irq);
344 #endif
345 }
346
interrupt_mask(uint32_t irq,unsigned int cpu)347 void interrupt_mask(uint32_t irq, unsigned int cpu)
348 {
349 /* TODO: how do we mask on other cores with Zephyr APIs */
350 }
351
interrupt_unmask(uint32_t irq,unsigned int cpu)352 void interrupt_unmask(uint32_t irq, unsigned int cpu)
353 {
354 /* TODO: how do we unmask on other cores with Zephyr APIs */
355 }
356
platform_interrupt_init(void)357 void platform_interrupt_init(void)
358 {
359 /* handled by zephyr - needed for linkage */
360 }
361
platform_interrupt_set(uint32_t irq)362 void platform_interrupt_set(uint32_t irq)
363 {
364 /* handled by zephyr - needed for linkage */
365 }
366
platform_interrupt_clear(uint32_t irq,uint32_t mask)367 void platform_interrupt_clear(uint32_t irq, uint32_t mask)
368 {
369 /* handled by zephyr - needed for linkage */
370 }
371 #endif
372
373 /*
374 * Timers.
375 *
376 * Mostly mapped. TODO: align with 64bit Zephyr timers when they are upstream.
377 */
378
379 #if !CONFIG_LIBRARY
arch_timer_get_system(struct timer * timer)380 uint64_t arch_timer_get_system(struct timer *timer)
381 {
382 return platform_timer_get(timer);
383 }
384 #endif
385
platform_timer_get(struct timer * timer)386 uint64_t platform_timer_get(struct timer *timer)
387 {
388 #if CONFIG_SOC_SERIES_INTEL_ADSP_BAYTRAIL
389 uint32_t low;
390 uint32_t high;
391 uint64_t time;
392
393 do {
394 /* TODO: check and see whether 32bit IRQ is pending for timer */
395 high = timer->hitime;
396 /* read low 32 bits */
397 low = shim_read(SHIM_EXT_TIMER_STAT);
398 } while (high != timer->hitime);
399
400 time = ((uint64_t)high << 32) | low;
401
402 return time;
403 #elif CONFIG_SOC_SERIES_INTEL_ADSP_BROADWELL || CONFIG_LIBRARY
404 // FIXME!
405 return 0;
406 #elif CONFIG_IMX
407 /* For i.MX use Xtensa timer, as we do now with SOF */
408 uint64_t time = 0;
409 uint32_t low;
410 uint32_t high;
411 uint32_t ccompare;
412
413 if (!timer || timer->id >= ARCH_TIMER_COUNT)
414 goto out;
415
416 ccompare = xthal_get_ccompare(timer->id);
417
418 /* read low 32 bits */
419 low = xthal_get_ccount();
420
421 /* check and see whether 32bit IRQ is pending for timer */
422 if (arch_interrupt_get_status() & (1 << timer->irq) && ccompare == 1) {
423 /* yes, overflow has occurred but handler has not run */
424 high = timer->hitime + 1;
425 } else {
426 /* no overflow */
427 high = timer->hitime;
428 }
429
430 time = ((uint64_t)high << 32) | low;
431
432 out:
433
434 return time;
435 #else
436 /* CAVS versions */
437 return shim_read64(SHIM_DSPWC);
438 #endif
439 }
440
platform_timer_stop(struct timer * timer)441 void platform_timer_stop(struct timer *timer)
442 {
443 }
444
platform_timer_get_atomic(struct timer * timer)445 uint64_t platform_timer_get_atomic(struct timer *timer)
446 {
447 uint32_t flags;
448 uint64_t ticks_now;
449
450 irq_local_disable(flags);
451 ticks_now = platform_timer_get(timer);
452 irq_local_enable(flags);
453
454 return ticks_now;
455 }
456
457 /*
458 * Notifier.
459 *
460 * Use SOF inter component messaging today. Zephyr has similar APIs that will
461 * need some minor feature updates prior to merge. i.e. FW to host messages.
462 * TODO: align with Zephyr API when ready.
463 */
464
465 static struct notify *host_notify[CONFIG_CORE_COUNT];
466
arch_notify_get(void)467 struct notify **arch_notify_get(void)
468 {
469 return host_notify + cpu_get_id();
470 }
471
472 /*
473 * Debug
474 */
arch_dump_regs_a(void * dump_buf)475 void arch_dump_regs_a(void *dump_buf)
476 {
477 /* needed for linkage only */
478 }
479
480 /*
481 * Xtensa. TODO: this needs removed and fixed in SOF.
482 */
_xtos_ints_off(unsigned int mask)483 unsigned int _xtos_ints_off(unsigned int mask)
484 {
485 /* turn all local IRQs OFF */
486 irq_lock();
487 return 0;
488 }
489
490 void ipc_send_queued_msg(void);
491
ipc_send_queued_callback(void * private_data,enum notify_id event_type,void * caller_data)492 static void ipc_send_queued_callback(void *private_data, enum notify_id event_type,
493 void *caller_data)
494 {
495 if (!ipc_get()->pm_prepare_D3)
496 ipc_send_queued_msg();
497 }
498
499 /*
500 * Audio components.
501 *
502 * Integrated except for linkage so symbols are "used" here until linker
503 * support is ready in Zephyr. TODO: fix component linkage in Zephyr.
504 */
505
506 /* TODO: this is not yet working with Zephyr - section has been created but
507 * no symbols are being loaded into ELF file.
508 */
509 extern intptr_t _module_init_start;
510 extern intptr_t _module_init_end;
511
sys_module_init(void)512 static void sys_module_init(void)
513 {
514 #if !CONFIG_LIBRARY
515 intptr_t *module_init = (intptr_t *)(&_module_init_start);
516
517 for (; module_init < (intptr_t *)&_module_init_end; ++module_init)
518 ((void(*)(void))(*module_init))();
519 #endif
520 }
521
522 /*
523 * TODO: all the audio processing components/modules constructor should be
524 * linked to the module_init section, but this is not happening. Just call
525 * constructors directly atm.
526 */
527
528 void sys_comp_volume_init(void);
529 void sys_comp_host_init(void);
530 void sys_comp_mixer_init(void);
531 void sys_comp_dai_init(void);
532 void sys_comp_src_init(void);
533 void sys_comp_mux_init(void);
534 void sys_comp_selector_init(void);
535 void sys_comp_switch_init(void);
536 void sys_comp_tone_init(void);
537 void sys_comp_eq_fir_init(void);
538 void sys_comp_keyword_init(void);
539 void sys_comp_asrc_init(void);
540 void sys_comp_dcblock_init(void);
541 void sys_comp_eq_iir_init(void);
542 void sys_comp_kpb_init(void);
543 void sys_comp_smart_amp_init(void);
544
545 /* Zephyr redefines log_message() and mtrace_printf() which leaves
546 * totally empty the .static_log_entries ELF sections for the
547 * sof-logger. This makes smex fail. Define at least one such section to
548 * fix the build when sof-logger is not used.
549 */
smex_placeholder_f(void)550 static inline const void *smex_placeholder_f(void)
551 {
552 _DECLARE_LOG_ENTRY(LOG_LEVEL_DEBUG,
553 "placeholder so .static_log.X are not all empty",
554 _TRACE_INV_CLASS, 0);
555
556 return &log_entry;
557 }
558
559 /* Need to actually use the function and export something otherwise the
560 * compiler optimizes everything away.
561 */
562 const void *_smex_placeholder;
563
task_main_start(struct sof * sof)564 int task_main_start(struct sof *sof)
565 {
566 _smex_placeholder = smex_placeholder_f();
567
568 int ret;
569
570 /* init default audio components */
571 sys_comp_init(sof);
572
573 /* init self-registered modules */
574 sys_module_init();
575
576 /* host is mandatory */
577 sys_comp_host_init();
578
579 if (IS_ENABLED(CONFIG_COMP_VOLUME)) {
580 sys_comp_volume_init();
581 }
582
583 if (IS_ENABLED(CONFIG_COMP_MIXER)) {
584 sys_comp_mixer_init();
585 }
586
587 if (IS_ENABLED(CONFIG_COMP_DAI)) {
588 sys_comp_dai_init();
589 }
590
591 if (IS_ENABLED(CONFIG_COMP_SRC)) {
592 sys_comp_src_init();
593 }
594
595 if (IS_ENABLED(CONFIG_COMP_SEL)) {
596 sys_comp_selector_init();
597 }
598
599 if (IS_ENABLED(CONFIG_COMP_SWITCH)) {
600 sys_comp_switch_init();
601 }
602
603 if (IS_ENABLED(CONFIG_COMP_TONE)) {
604 sys_comp_tone_init();
605 }
606
607 if (IS_ENABLED(CONFIG_COMP_FIR)) {
608 sys_comp_eq_fir_init();
609 }
610
611 if (IS_ENABLED(CONFIG_COMP_IIR)) {
612 sys_comp_eq_iir_init();
613 }
614
615 if (IS_ENABLED(CONFIG_SAMPLE_KEYPHRASE)) {
616 sys_comp_keyword_init();
617 }
618
619 if (IS_ENABLED(CONFIG_COMP_KPB)) {
620 sys_comp_kpb_init();
621 }
622
623 if (IS_ENABLED(CONFIG_SAMPLE_SMART_AMP)) {
624 sys_comp_smart_amp_init();
625 }
626
627 if (IS_ENABLED(CONFIG_COMP_ASRC)) {
628 sys_comp_asrc_init();
629 }
630
631 if (IS_ENABLED(CONFIG_COMP_DCBLOCK)) {
632 sys_comp_dcblock_init();
633 }
634
635 if (IS_ENABLED(CONFIG_COMP_MUX)) {
636 sys_comp_mux_init();
637 }
638
639 /* init pipeline position offsets */
640 pipeline_posn_init(sof);
641
642 #if defined(CONFIG_IMX)
643 #define SOF_IPC_QUEUED_DOMAIN SOF_SCHEDULE_LL_DMA
644 #else
645 #define SOF_IPC_QUEUED_DOMAIN SOF_SCHEDULE_LL_TIMER
646 #endif
647
648 /* Temporary fix for issue #4356 */
649 (void)notifier_register(NULL, scheduler_get_data(SOF_IPC_QUEUED_DOMAIN),
650 NOTIFIER_ID_LL_POST_RUN,
651 ipc_send_queued_callback, 0);
652
653 /* let host know DSP boot is complete */
654 ret = platform_boot_complete(0);
655
656 return ret;
657 }
658
659 /*
660 * Timestamps.
661 *
662 * TODO: move to generic code in SOF, currently platform code.
663 */
664
665 /* get timestamp for host stream DMA position */
platform_host_timestamp(struct comp_dev * host,struct sof_ipc_stream_posn * posn)666 void platform_host_timestamp(struct comp_dev *host,
667 struct sof_ipc_stream_posn *posn)
668 {
669 int err;
670
671 /* get host position */
672 err = comp_position(host, posn);
673 if (err == 0)
674 posn->flags |= SOF_TIME_HOST_VALID;
675 }
676
677 /* get timestamp for DAI stream DMA position */
platform_dai_timestamp(struct comp_dev * dai,struct sof_ipc_stream_posn * posn)678 void platform_dai_timestamp(struct comp_dev *dai,
679 struct sof_ipc_stream_posn *posn)
680 {
681 int err;
682
683 /* get DAI position */
684 err = comp_position(dai, posn);
685 if (err == 0)
686 posn->flags |= SOF_TIME_DAI_VALID;
687
688 /* get SSP wallclock - DAI sets this to stream start value */
689 posn->wallclock = platform_timer_get(NULL) - posn->wallclock;
690 posn->wallclock_hz = clock_get_freq(PLATFORM_DEFAULT_CLOCK);
691 posn->flags |= SOF_TIME_WALL_VALID;
692 }
693
694 /* get current wallclock for componnent */
platform_dai_wallclock(struct comp_dev * dai,uint64_t * wallclock)695 void platform_dai_wallclock(struct comp_dev *dai, uint64_t *wallclock)
696 {
697 *wallclock = platform_timer_get(NULL);
698 }
699
700 /*
701 * Multicore
702 *
703 * Mostly empty today waiting pending Zephyr CAVS SMP integration.
704 */
705 #if CONFIG_MULTICORE && CONFIG_SMP
706 static atomic_t start_flag;
707
secondary_init(void * arg)708 static FUNC_NORETURN void secondary_init(void *arg)
709 {
710 struct k_thread dummy_thread;
711
712 z_smp_thread_init(arg, &dummy_thread);
713 secondary_core_init(sof_get());
714
715 #ifdef CONFIG_THREAD_STACK_INFO
716 dummy_thread.stack_info.start = (uintptr_t)z_interrupt_stacks +
717 arch_curr_cpu()->id * Z_KERNEL_STACK_LEN(CONFIG_ISR_STACK_SIZE);
718 dummy_thread.stack_info.size = Z_KERNEL_STACK_LEN(CONFIG_ISR_STACK_SIZE);
719 #endif
720
721 z_smp_thread_swap();
722
723 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
724 }
725
arch_cpu_enable_core(int id)726 int arch_cpu_enable_core(int id)
727 {
728 atomic_clear(&start_flag);
729
730 /* Power up secondary core */
731 pm_runtime_get(PM_RUNTIME_DSP, PWRD_BY_TPLG | id);
732
733 arch_start_cpu(id, z_interrupt_stacks[id], CONFIG_ISR_STACK_SIZE,
734 secondary_init, &start_flag);
735
736 atomic_set(&start_flag, 1);
737
738 return 0;
739 }
740
arch_cpu_disable_core(int id)741 void arch_cpu_disable_core(int id)
742 {
743 /* TODO: call Zephyr API */
744 }
745
arch_cpu_is_core_enabled(int id)746 int arch_cpu_is_core_enabled(int id)
747 {
748 return arch_cpu_active(id);
749 }
750
cpu_power_down_core(void)751 void cpu_power_down_core(void)
752 {
753 /* TODO: use Zephyr version */
754 }
755
arch_cpu_enabled_cores(void)756 int arch_cpu_enabled_cores(void)
757 {
758 unsigned int i;
759 int mask = 0;
760
761 for (i = 0; i < CONFIG_MP_NUM_CPUS; i++)
762 if (arch_cpu_active(i))
763 mask |= BIT(i);
764
765 return mask;
766 }
767
768 static struct idc idc[CONFIG_MP_NUM_CPUS];
769 static struct idc *p_idc[CONFIG_MP_NUM_CPUS];
770
idc_get(void)771 struct idc **idc_get(void)
772 {
773 int cpu = cpu_get_id();
774
775 p_idc[cpu] = idc + cpu;
776
777 return p_idc + cpu;
778 }
779 #endif
780
781