1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 // Copyright(c) 2019 Intel Corporation. All rights reserved.
4 //
5 // Author: Marcin Rajwa <marcin.rajwa@linux.intel.com>
6
7 /*
8 * A key phrase buffer component.
9 */
10
11 /**
12 * \file audio/kpb.c
13 * \brief Key phrase buffer component implementation
14 * \author Marcin Rajwa <marcin.rajwa@linux.intel.com>
15 */
16
17 #include <sof/audio/buffer.h>
18 #include <sof/audio/component_ext.h>
19 #include <sof/audio/pipeline.h>
20 #include <sof/audio/kpb.h>
21 #include <sof/audio/ipc-config.h>
22 #include <sof/common.h>
23 #include <rtos/panic.h>
24 #include <sof/ipc/msg.h>
25 #include <rtos/timer.h>
26 #include <rtos/alloc.h>
27 #include <rtos/clk.h>
28 #include <rtos/init.h>
29 #include <sof/lib/memory.h>
30 #include <sof/lib/notifier.h>
31 #include <sof/lib/pm_runtime.h>
32 #include <sof/lib/uuid.h>
33 #include <sof/list.h>
34 #include <sof/math/numbers.h>
35 #include <sof/platform.h>
36 #include <sof/schedule/edf_schedule.h>
37 #include <sof/schedule/schedule.h>
38 #include <rtos/task.h>
39 #include <rtos/string.h>
40 #include <sof/ut.h>
41 #include <ipc/topology.h>
42 #include <ipc4/kpb.h>
43 #include <user/kpb.h>
44 #include <user/trace.h>
45 #include <errno.h>
46 #include <limits.h>
47 #include <stdbool.h>
48 #include <stddef.h>
49 #include <stdint.h>
50
51 static const struct comp_driver comp_kpb;
52
53 LOG_MODULE_REGISTER(kpb, CONFIG_SOF_LOG_LEVEL);
54 #if CONFIG_IPC_MAJOR_4
55 /* A8A0CB32-4A77-4DB1-85C7-53D7EE07BCE6 */
56 DECLARE_SOF_RT_UUID("kpb", kpb_uuid, 0xA8A0CB32, 0x4A77, 0x4DB1,
57 0x85, 0xC7, 0x53, 0xD7, 0xEE, 0x07, 0xBC, 0xE6);
58 #else
59 /* d8218443-5ff3-4a4c-b388-6cfe07b9562e */
60 DECLARE_SOF_RT_UUID("kpb", kpb_uuid, 0xd8218443, 0x5ff3, 0x4a4c,
61 0xb3, 0x88, 0x6c, 0xfe, 0x07, 0xb9, 0x56, 0x2e);
62 #endif
63
64 DECLARE_TR_CTX(kpb_tr, SOF_UUID(kpb_uuid), LOG_LEVEL_INFO);
65
66 /* e50057a5-8b27-4db4-bd79-9a639cee5f50 */
67 DECLARE_SOF_UUID("kpb-task", kpb_task_uuid, 0xe50057a5, 0x8b27, 0x4db4,
68 0xbd, 0x79, 0x9a, 0x63, 0x9c, 0xee, 0x5f, 0x50);
69
70 /* KPB private data, runtime data */
71 struct comp_data {
72 enum kpb_state state; /**< current state of KPB component */
73 uint32_t state_log; /**< keeps record of KPB recent states */
74 #ifndef __ZEPHYR__
75 struct k_spinlock lock; /**< locking mechanism for read pointer calculations */
76 k_spinlock_key_t key;
77 #else
78 struct k_mutex lock;
79 #endif
80 struct sof_kpb_config config; /**< component configuration data */
81 struct history_data hd; /** data related to history buffer */
82 struct task draining_task;
83 struct draining_data draining_task_data;
84 struct kpb_client clients[KPB_MAX_NO_OF_CLIENTS];
85 struct comp_buffer *sel_sink; /**< real time sink (channel selector)*/
86 struct comp_buffer *host_sink; /**< draining sink (client) */
87 uint32_t kpb_no_of_clients; /**< number of registered clients */
88 uint32_t source_period_bytes; /**< source number of period bytes */
89 uint32_t sink_period_bytes; /**< sink number of period bytes */
90 size_t host_buffer_size; /**< size of host buffer */
91 size_t host_period_size; /**< size of history period */
92 bool sync_draining_mode; /**< should we synchronize draining with
93 * host?
94 */
95 enum comp_copy_type force_copy_type; /**< should we force copy_type on kpb sink? */
96 #ifdef CONFIG_IPC_MAJOR_4
97 struct ipc4_kpb_module_cfg ipc4_cfg;
98 #endif /* CONFIG_IPC_MAJOR_4 */
99 uint32_t num_of_sel_mic;
100 uint32_t num_of_in_channels;
101 uint32_t offsets[KPB_MAX_MICSEL_CHANNELS];
102 struct kpb_micselector_config mic_sel;
103 };
104
105 /*! KPB private functions */
106 static void kpb_event_handler(void *arg, enum notify_id type, void *event_data);
107 static int kpb_register_client(struct comp_data *kpb, struct kpb_client *cli);
108 static void kpb_init_draining(struct comp_dev *dev, struct kpb_client *cli);
109 static enum task_state kpb_draining_task(void *arg);
110 static int kpb_buffer_data(struct comp_dev *dev,
111 const struct comp_buffer __sparse_cache *source, size_t size);
112 static size_t kpb_allocate_history_buffer(struct comp_data *kpb,
113 size_t hb_size_req);
114 static void kpb_clear_history_buffer(struct history_buffer *buff);
115 static void kpb_free_history_buffer(struct history_buffer *buff);
116 static inline bool kpb_is_sample_width_supported(uint32_t sampling_width);
117 static void kpb_copy_samples(struct comp_buffer __sparse_cache *sink,
118 struct comp_buffer __sparse_cache *source, size_t size,
119 size_t sample_width, uint32_t channels);
120 static void kpb_drain_samples(void *source, struct audio_stream __sparse_cache *sink,
121 size_t size, size_t sample_width);
122 static void kpb_buffer_samples(const struct audio_stream __sparse_cache *source,
123 int offset, void *sink, size_t size,
124 size_t sample_width);
125 static void kpb_reset_history_buffer(struct history_buffer *buff);
126 static inline bool validate_host_params(struct comp_dev *dev,
127 size_t host_period_size,
128 size_t host_buffer_size,
129 size_t hb_size_req);
130 static inline void kpb_change_state(struct comp_data *kpb,
131 enum kpb_state state);
132
kpb_task_deadline(void * data)133 static uint64_t kpb_task_deadline(void *data)
134 {
135 return SOF_TASK_DEADLINE_ALMOST_IDLE;
136 }
137
138 #ifdef __ZEPHYR__
139
kpb_lock(struct comp_data * kpb)140 static void kpb_lock(struct comp_data *kpb)
141 {
142 k_mutex_lock(&kpb->lock, K_FOREVER);
143 }
144
kpb_unlock(struct comp_data * kpb)145 static void kpb_unlock(struct comp_data *kpb)
146 {
147 k_mutex_unlock(&kpb->lock);
148 }
149
kpb_lock_init(struct comp_data * kpb)150 static void kpb_lock_init(struct comp_data *kpb)
151 {
152 k_mutex_init(&kpb->lock);
153 }
154
155 #else /* __ZEPHYR__ */
156
kpb_lock(struct comp_data * kpb)157 static void kpb_lock(struct comp_data *kpb)
158 {
159 kpb->key = k_spin_lock(&kpb->lock);
160 }
161
kpb_unlock(struct comp_data * kpb)162 static void kpb_unlock(struct comp_data *kpb)
163 {
164 k_spin_unlock(&kpb->lock, kpb->key);
165 }
166
kpb_lock_init(struct comp_data * kpb)167 static void kpb_lock_init(struct comp_data *kpb)
168 {
169 k_spinlock_init(&kpb->lock);
170 }
171
172 #endif /* __ZEPHYR__ */
173
174 #if CONFIG_IPC_MAJOR_4
175 /**
176 * \brief Set and verify ipc params.
177 * \param[in] dev - component device pointer.
178 * \param[in] ipc_config - ipc config pointer.
179 * \return: none.
180 */
kpb_set_verify_ipc_params(struct comp_dev * dev,const struct ipc4_kpb_module_cfg * ipc_config)181 static int kpb_set_verify_ipc_params(struct comp_dev *dev,
182 const struct ipc4_kpb_module_cfg *ipc_config)
183 {
184 struct comp_data *kpb = comp_get_drvdata(dev);
185
186 kpb->config.channels = ipc_config->base_cfg.audio_fmt.channels_count;
187 kpb->config.sampling_freq =
188 ipc_config->base_cfg.audio_fmt.sampling_frequency;
189 kpb->config.sampling_width =
190 ipc_config->base_cfg.audio_fmt.valid_bit_depth;
191 kpb->ipc4_cfg.base_cfg = ipc_config->base_cfg;
192
193 /* Initialize sinks */
194 kpb->sel_sink = NULL;
195 kpb->host_sink = NULL;
196
197 if (!kpb_is_sample_width_supported(kpb->config.sampling_width)) {
198 comp_err(dev, "kpb_set_verify_ipc_params(): requested sampling width not supported");
199 return -EINVAL;
200 }
201
202 if (kpb->config.channels > KPB_MAX_SUPPORTED_CHANNELS) {
203 comp_err(dev, "kpb_set_verify_ipc_params(): no of channels exceeded the limit");
204 return -EINVAL;
205 }
206
207 if (kpb->config.sampling_freq != KPB_SAMPLNG_FREQUENCY) {
208 comp_err(dev, "kpb_set_verify_ipc_params(): requested sampling frequency not supported");
209 return -EINVAL;
210 }
211
212 return 0;
213 }
214
215 /**
216 * \brief Set KPB component stream params.
217 * \param[in] dev - component device pointer.
218 * \param[in] params - sof ipc stream params pointer.
219 * \return: none.
220 */
kpb_set_params(struct comp_dev * dev,struct sof_ipc_stream_params * params)221 static void kpb_set_params(struct comp_dev *dev,
222 struct sof_ipc_stream_params *params)
223 {
224 struct comp_data *kpb = comp_get_drvdata(dev);
225 uint32_t __sparse_cache valid_fmt, frame_fmt;
226
227 comp_dbg(dev, "kpb_set_params()");
228
229 memset_s(params, sizeof(*params), 0, sizeof(*params));
230 params->channels = kpb->ipc4_cfg.base_cfg.audio_fmt.channels_count;
231 params->rate = kpb->ipc4_cfg.base_cfg.audio_fmt.sampling_frequency;
232 params->sample_container_bytes = kpb->ipc4_cfg.base_cfg.audio_fmt.depth / 8;
233 params->sample_valid_bytes =
234 kpb->ipc4_cfg.base_cfg.audio_fmt.valid_bit_depth / 8;
235 params->buffer_fmt = kpb->ipc4_cfg.base_cfg.audio_fmt.interleaving_style;
236 params->buffer.size = kpb->ipc4_cfg.base_cfg.ibs * KPB_MAX_BUFF_TIME * params->channels;
237
238 params->host_period_bytes = params->channels *
239 params->sample_container_bytes *
240 (params->rate / 1000);
241
242 audio_stream_fmt_conversion(kpb->ipc4_cfg.base_cfg.audio_fmt.depth,
243 kpb->ipc4_cfg.base_cfg.audio_fmt.valid_bit_depth,
244 &frame_fmt, &valid_fmt,
245 kpb->ipc4_cfg.base_cfg.audio_fmt.s_type);
246
247 params->frame_fmt = frame_fmt;
248 }
249
250 /**
251 * \brief Set KPB component stream params.
252 * \param[in] dev - component device pointer.
253 * \param[in] type - sof ipc stream params pointer.
254 * \param[in] value - ipc4 base module config pointer.
255 * \return: none.
256 */
kpb_get_attribute(struct comp_dev * dev,uint32_t type,void * value)257 static int kpb_get_attribute(struct comp_dev *dev,
258 uint32_t type,
259 void *value)
260 {
261 struct comp_data *kpb = comp_get_drvdata(dev);
262
263 switch (type) {
264 case COMP_ATTR_BASE_CONFIG:
265 *(struct ipc4_base_module_cfg *)value = kpb->ipc4_cfg.base_cfg;
266 break;
267 default:
268 return -EINVAL;
269 }
270
271 return 0;
272 }
273
274 /**
275 * \brief Initialize KPB sinks when binding.
276 * \param[in] dev - component device pointer.
277 * \param[in] data - ipc4 bind/unbind data.
278 * \return: none.
279 */
kpb_bind(struct comp_dev * dev,void * data)280 static int kpb_bind(struct comp_dev *dev, void *data)
281 {
282 struct comp_data *kpb = comp_get_drvdata(dev);
283 struct ipc4_module_bind_unbind *bu;
284 struct list_item *blist;
285 int buf_id;
286 int ret = 0;
287
288 comp_dbg(dev, "kpb_bind()");
289
290 bu = (struct ipc4_module_bind_unbind *)data;
291 buf_id = IPC4_COMP_ID(bu->extension.r.src_queue, bu->extension.r.dst_queue);
292
293 /* We're assuming here that KPB Real Time sink (kpb->sel_sink) is
294 * always connected to input pin of Detector pipeline so during IPC4
295 * Bind operation both src_queue and dst_queue will have id = 0
296 * (Detector/MicSel has one input pin). To properly connect KPB sink
297 * with Detector source we're looking for buffer with id=0.
298 */
299
300 list_for_item(blist, &dev->bsink_list) {
301 struct comp_buffer *sink = container_of(blist, struct comp_buffer, source_list);
302 struct comp_buffer __sparse_cache *sink_c = buffer_acquire(sink);
303 int sink_buf_id;
304
305 if (!sink_c->sink) {
306 ret = -EINVAL;
307 buffer_release(sink_c);
308 break;
309 }
310
311 sink_buf_id = sink_c->id;
312 buffer_release(sink_c);
313
314 if (sink_buf_id == buf_id) {
315 if (sink_buf_id == 0)
316 kpb->sel_sink = sink;
317 else
318 kpb->host_sink = sink;
319 }
320 }
321
322 return ret;
323 }
324
325 /**
326 * \brief Reset KPB sinks when unbinding.
327 * \param[in] dev - component device pointer.
328 * \param[in] data - ipc4 bind/unbind data.
329 * \return: none.
330 */
kpb_unbind(struct comp_dev * dev,void * data)331 static int kpb_unbind(struct comp_dev *dev, void *data)
332 {
333 struct comp_data *kpb = comp_get_drvdata(dev);
334 struct ipc4_module_bind_unbind *bu;
335 int buf_id;
336
337 comp_dbg(dev, "kpb_bind()");
338
339 bu = (struct ipc4_module_bind_unbind *)data;
340 buf_id = IPC4_COMP_ID(bu->extension.r.src_queue, bu->extension.r.dst_queue);
341
342 /* Reset sinks when unbinding */
343 if (buf_id == 0)
344 kpb->sel_sink = NULL;
345 else
346 kpb->host_sink = NULL;
347
348 return 0;
349 }
350
351 #else /* CONFIG_IPC_MAJOR_4 */
352 /**
353 * \brief Set and verify ipc params.
354 * \param[in] dev - component device pointer.
355 * \param[in] ipc_config - ipc config pointer type.
356 * \return: none.
357 */
kpb_set_verify_ipc_params(struct comp_dev * dev,const struct ipc_config_process * ipc_config)358 static int kpb_set_verify_ipc_params(struct comp_dev *dev,
359 const struct ipc_config_process *ipc_config)
360 {
361 struct comp_data *kpb = comp_get_drvdata(dev);
362 int ret;
363
364 ret = memcpy_s(&kpb->config, sizeof(kpb->config), ipc_config->data,
365 ipc_config->size);
366 assert(!ret);
367
368 /* Initialize sinks */
369 kpb->sel_sink = NULL;
370 kpb->host_sink = NULL;
371
372 if (!kpb_is_sample_width_supported(kpb->config.sampling_width)) {
373 comp_err(dev, "kpb_set_verify_ipc_params(): requested sampling width not supported");
374 return -EINVAL;
375 }
376
377 if (kpb->config.channels > KPB_MAX_SUPPORTED_CHANNELS) {
378 comp_err(dev, "kpb_set_verify_ipc_params(): no of channels exceeded the limit");
379 return -EINVAL;
380 }
381
382 if (kpb->config.sampling_freq != KPB_SAMPLNG_FREQUENCY) {
383 comp_err(dev, "kpb_set_verify_ipc_params(): requested sampling frequency not supported");
384 return -EINVAL;
385 }
386
387 return 0;
388 }
389
kpb_set_params(struct comp_dev * dev,struct sof_ipc_stream_params * params)390 static void kpb_set_params(struct comp_dev *dev,
391 struct sof_ipc_stream_params *params)
392 {}
393 #endif /* CONFIG_IPC_MAJOR_4 */
394
395 /*
396 * \brief Create a key phrase buffer component.
397 * \param[in] config - generic ipc component pointer.
398 *
399 * \return: a pointer to newly created KPB component.
400 */
kpb_new(const struct comp_driver * drv,const struct comp_ipc_config * config,const void * spec)401 static struct comp_dev *kpb_new(const struct comp_driver *drv,
402 const struct comp_ipc_config *config,
403 const void *spec)
404 {
405 #if CONFIG_IPC_MAJOR_4
406 const struct ipc4_kpb_module_cfg *ipc_process = spec;
407 size_t ipc_config_size = sizeof(*ipc_process);
408 size_t kpb_config_size = sizeof(struct ipc4_kpb_module_cfg);
409 #else
410 const struct ipc_config_process *ipc_process = spec;
411 size_t ipc_config_size = ipc_process->size;
412 size_t kpb_config_size = sizeof(struct sof_kpb_config);
413 #endif
414 struct task_ops ops = {
415 .run = kpb_draining_task,
416 .get_deadline = kpb_task_deadline,
417 };
418
419 struct comp_dev *dev;
420 struct comp_data *kpb;
421 int ret;
422
423 comp_cl_info(&comp_kpb, "kpb_new()");
424
425 /* make sure data size is not bigger than config space */
426 if (ipc_config_size > kpb_config_size) {
427 comp_cl_err(&comp_kpb, "kpb_new(): ipc config size %u too big",
428 ipc_config_size);
429 return NULL;
430 }
431
432 dev = comp_alloc(drv, sizeof(*dev));
433 if (!dev)
434 return NULL;
435 dev->ipc_config = *config;
436
437 kpb = rzalloc(SOF_MEM_ZONE_RUNTIME, 0, SOF_MEM_CAPS_RAM, sizeof(*kpb));
438 if (!kpb) {
439 rfree(dev);
440 return NULL;
441 }
442
443 comp_set_drvdata(dev, kpb);
444
445 ret = kpb_set_verify_ipc_params(dev, ipc_process);
446 if (ret) {
447 rfree(dev);
448 return NULL;
449 }
450
451 kpb_lock_init(kpb);
452
453 /* Initialize draining task */
454 schedule_task_init_edf(&kpb->draining_task, /* task structure */
455 SOF_UUID(kpb_task_uuid), /* task uuid */
456 &ops, /* task ops */
457 &kpb->draining_task_data, /* task private data */
458 0, /* core on which we should run */
459 0); /* no flags */
460
461 /* Init basic component data */
462 kpb->hd.c_hb = NULL;
463 kpb->kpb_no_of_clients = 0;
464 kpb->state_log = 0;
465
466 #ifdef CONFIG_KPB_FORCE_COPY_TYPE_NORMAL
467 kpb->force_copy_type = COMP_COPY_NORMAL;
468 #else
469 kpb->force_copy_type = COMP_COPY_INVALID; /* do not change kpb sink copy type */
470 #endif
471
472 /* Kpb has been created successfully */
473 dev->state = COMP_STATE_READY;
474 kpb_change_state(kpb, KPB_STATE_CREATED);
475
476 return dev;
477 }
478
479 /**
480 * \brief Allocate history buffer.
481 * \param[in] kpb - KPB component data pointer.
482 *
483 * \return: none.
484 */
kpb_allocate_history_buffer(struct comp_data * kpb,size_t hb_size_req)485 static size_t kpb_allocate_history_buffer(struct comp_data *kpb,
486 size_t hb_size_req)
487 {
488 struct history_buffer *hb;
489 struct history_buffer *new_hb = NULL;
490 /*! Total allocation size */
491 size_t hb_size = hb_size_req;
492 /*! Current allocation size */
493 size_t ca_size = hb_size;
494 /*! Memory caps priorites for history buffer */
495 int hb_mcp[KPB_NO_OF_MEM_POOLS] = {SOF_MEM_CAPS_LP, SOF_MEM_CAPS_HP,
496 SOF_MEM_CAPS_RAM };
497 void *new_mem_block = NULL;
498 size_t temp_ca_size;
499 int i = 0;
500 size_t allocated_size = 0;
501
502 comp_cl_info(&comp_kpb, "kpb_allocate_history_buffer()");
503
504 /* Initialize history buffer */
505 kpb->hd.c_hb = rzalloc(SOF_MEM_ZONE_RUNTIME, 0, SOF_MEM_CAPS_RAM,
506 sizeof(struct history_buffer));
507 if (!kpb->hd.c_hb)
508 return 0;
509 kpb->hd.c_hb->next = kpb->hd.c_hb;
510 kpb->hd.c_hb->prev = kpb->hd.c_hb;
511 hb = kpb->hd.c_hb;
512
513 /* Allocate history buffer/s. KPB history buffer has a size of
514 * KPB_MAX_BUFFER_SIZE, since there is no single memory block
515 * that big, we need to allocate couple smaller blocks which
516 * linked together will form history buffer.
517 */
518 while (hb_size > 0 && i < ARRAY_SIZE(hb_mcp)) {
519 /* Try to allocate ca_size (current allocation size). At first
520 * attempt it will be equal to hb_size (history buffer size).
521 */
522 new_mem_block = rballoc(0, hb_mcp[i], ca_size);
523
524 if (new_mem_block) {
525 /* We managed to allocate a block of ca_size.
526 * Now we initialize it.
527 */
528 comp_cl_info(&comp_kpb, "kpb new memory block: %d",
529 ca_size);
530 allocated_size += ca_size;
531 hb->start_addr = new_mem_block;
532 hb->end_addr = (char *)new_mem_block +
533 ca_size;
534 hb->w_ptr = new_mem_block;
535 hb->r_ptr = new_mem_block;
536 hb->state = KPB_BUFFER_FREE;
537 hb_size -= ca_size;
538 hb->next = kpb->hd.c_hb;
539 /* Do we need another buffer? */
540 if (hb_size > 0) {
541 /* Yes, we still need at least one more buffer.
542 * Let's first create new container for it.
543 */
544 new_hb = rzalloc(SOF_MEM_ZONE_RUNTIME, 0,
545 SOF_MEM_CAPS_RAM,
546 sizeof(struct history_buffer));
547 if (!new_hb)
548 return 0;
549 hb->next = new_hb;
550 new_hb->next = kpb->hd.c_hb;
551 new_hb->state = KPB_BUFFER_OFF;
552 new_hb->prev = hb;
553 hb = new_hb;
554 kpb->hd.c_hb->prev = new_hb;
555 ca_size = hb_size;
556 i++;
557 }
558 } else {
559 /* We've failed to allocate ca_size of that hb_mcp
560 * let's try again with some smaller size.
561 * NOTE! If we decrement by some small value,
562 * the allocation will take significant time.
563 * However, bigger values like
564 * HEAP_HP_BUFFER_BLOCK_SIZE will result in lower
565 * accuracy of allocation.
566 */
567 temp_ca_size = ca_size - KPB_ALLOCATION_STEP;
568 ca_size = (ca_size < temp_ca_size) ? 0 : temp_ca_size;
569 if (ca_size == 0) {
570 ca_size = hb_size;
571 i++;
572 }
573 continue;
574 }
575 }
576
577 comp_cl_info(&comp_kpb, "kpb_allocate_history_buffer(): allocated %d bytes",
578 allocated_size);
579
580 return allocated_size;
581 }
582
583 /**
584 * \brief Reclaim memory of a history buffer.
585 * \param[in] buff - pointer to current history buffer.
586 *
587 * \return none.
588 */
kpb_free_history_buffer(struct history_buffer * buff)589 static void kpb_free_history_buffer(struct history_buffer *buff)
590 {
591 struct history_buffer *_buff;
592 struct history_buffer *first_buff = buff;
593
594 comp_cl_info(&comp_kpb, "kpb_free_history_buffer()");
595
596 if (!buff)
597 return;
598
599 /* Free history buffer/s */
600 do {
601 /* First reclaim HB internal memory, then HB itself */
602 if (buff->start_addr)
603 rfree(buff->start_addr);
604
605 _buff = buff->next;
606 rfree(buff);
607 buff = _buff;
608 } while (buff && buff != first_buff);
609 }
610
611 /**
612 * \brief Reclaim memory of a key phrase buffer.
613 * \param[in] dev - component device pointer.
614 *
615 * \return none.
616 */
kpb_free(struct comp_dev * dev)617 static void kpb_free(struct comp_dev *dev)
618 {
619 struct comp_data *kpb = comp_get_drvdata(dev);
620
621 comp_info(dev, "kpb_free()");
622
623 /* Unregister KPB from notifications */
624 notifier_unregister(dev, NULL, NOTIFIER_ID_KPB_CLIENT_EVT);
625
626 /* Reclaim memory occupied by history buffer */
627 kpb_free_history_buffer(kpb->hd.c_hb);
628 kpb->hd.c_hb = NULL;
629 kpb->hd.buffer_size = 0;
630
631 /* remove scheduling */
632 schedule_task_free(&kpb->draining_task);
633
634 /* change state */
635 kpb_change_state(kpb, KPB_STATE_DISABLED);
636
637 /* Free KPB */
638 rfree(kpb);
639 rfree(dev);
640 }
641
642 /**
643 * \brief Trigger a change of KPB state.
644 * \param[in] dev - component device pointer.
645 * \param[in] cmd - command type.
646 * \return none.
647 */
kpb_trigger(struct comp_dev * dev,int cmd)648 static int kpb_trigger(struct comp_dev *dev, int cmd)
649 {
650 comp_info(dev, "kpb_trigger()");
651
652 return comp_set_state(dev, cmd);
653 }
654
kbp_verify_params(struct comp_dev * dev,struct sof_ipc_stream_params * params)655 static int kbp_verify_params(struct comp_dev *dev,
656 struct sof_ipc_stream_params *params)
657 {
658 int ret;
659
660 comp_dbg(dev, "kbp_verify_params()");
661
662 ret = comp_verify_params(dev, 0, params);
663 if (ret < 0) {
664 comp_err(dev, "kpb_verify_params(): comp_verify_params() failed");
665 return ret;
666 }
667
668 return 0;
669 }
670
671 /**
672 * \brief KPB params.
673 * \param[in] dev - component device pointer.
674 * \param[in] params - pcm params.
675 * \return none.
676 */
kpb_params(struct comp_dev * dev,struct sof_ipc_stream_params * params)677 static int kpb_params(struct comp_dev *dev,
678 struct sof_ipc_stream_params *params)
679 {
680 struct comp_data *kpb = comp_get_drvdata(dev);
681 int err;
682
683 if (dev->state == COMP_STATE_PREPARE) {
684 comp_err(dev, "kpb_params(): kpb has been already configured.");
685 return PPL_STATUS_PATH_STOP;
686 }
687
688 kpb_set_params(dev, params);
689
690 err = kbp_verify_params(dev, params);
691 if (err < 0) {
692 comp_err(dev, "kpb_params(): pcm params verification failed");
693 return -EINVAL;
694 }
695
696 kpb->host_buffer_size = params->buffer.size;
697 kpb->host_period_size = params->host_period_bytes;
698 kpb->config.sampling_width = params->sample_container_bytes * 8;
699
700 return 0;
701 }
702
703 /**
704 * \brief Prepare key phrase buffer.
705 * \param[in] dev - kpb component device pointer.
706 *
707 * \return integer representing either:
708 * 0 -> success
709 * -EINVAL -> failure.
710 */
kpb_prepare(struct comp_dev * dev)711 static int kpb_prepare(struct comp_dev *dev)
712 {
713 struct comp_data *kpb = comp_get_drvdata(dev);
714 int ret = 0;
715 int i;
716 size_t hb_size_req = KPB_MAX_BUFFER_SIZE(kpb->config.sampling_width, kpb->config.channels);
717
718 comp_dbg(dev, "kpb_prepare()");
719
720 if (kpb->state == KPB_STATE_RESETTING ||
721 kpb->state == KPB_STATE_RESET_FINISHING) {
722 comp_cl_err(&comp_kpb, "kpb_prepare(): can not prepare KPB due to ongoing reset, state log %x",
723 kpb->state_log);
724 return -EBUSY;
725 }
726
727 ret = comp_set_state(dev, COMP_TRIGGER_PREPARE);
728 if (ret < 0)
729 return ret;
730
731 if (ret == COMP_STATUS_STATE_ALREADY_SET)
732 return PPL_STATUS_PATH_STOP;
733
734 if (!validate_host_params(dev, kpb->host_period_size,
735 kpb->host_buffer_size, hb_size_req)) {
736 return -EINVAL;
737 }
738
739 kpb_change_state(kpb, KPB_STATE_PREPARING);
740
741 /* Init private data */
742 kpb->kpb_no_of_clients = 0;
743 kpb->hd.buffered = 0;
744
745 if (kpb->hd.c_hb && kpb->hd.buffer_size < hb_size_req) {
746 /* Host params has changed, we need to allocate new buffer */
747 kpb_free_history_buffer(kpb->hd.c_hb);
748 kpb->hd.c_hb = NULL;
749 }
750
751 if (!kpb->hd.c_hb) {
752 /* Allocate history buffer */
753 kpb->hd.buffer_size = kpb_allocate_history_buffer(kpb,
754 hb_size_req);
755
756 /* Have we allocated what we requested? */
757 if (kpb->hd.buffer_size < hb_size_req) {
758 comp_cl_err(&comp_kpb, "kpb_prepare(): failed to allocate space for KPB buffer");
759 kpb_free_history_buffer(kpb->hd.c_hb);
760 kpb->hd.c_hb = NULL;
761 kpb->hd.buffer_size = 0;
762 return -EINVAL;
763 }
764 }
765 /* Init history buffer */
766 kpb_reset_history_buffer(kpb->hd.c_hb);
767 kpb->hd.free = kpb->hd.buffer_size;
768
769 /* Initialize clients data */
770 for (i = 0; i < KPB_MAX_NO_OF_CLIENTS; i++) {
771 kpb->clients[i].state = KPB_CLIENT_UNREGISTERED;
772 kpb->clients[i].r_ptr = NULL;
773 }
774
775 /* Register KPB for notification */
776 ret = notifier_register(dev, NULL, NOTIFIER_ID_KPB_CLIENT_EVT,
777 kpb_event_handler, 0);
778 if (ret < 0) {
779 kpb_free_history_buffer(kpb->hd.c_hb);
780 kpb->hd.c_hb = NULL;
781 return -ENOMEM;
782 }
783
784 #ifndef CONFIG_IPC_MAJOR_4
785 /* Search for KPB related sinks.
786 * NOTE! We assume here that channel selector component device
787 * is connected to the KPB sinks as well as host device.
788 */
789 struct list_item *blist;
790
791 list_for_item(blist, &dev->bsink_list) {
792 struct comp_buffer *sink = container_of(blist, struct comp_buffer, source_list);
793 struct comp_buffer __sparse_cache *sink_c = buffer_acquire(sink);
794 enum sof_comp_type type;
795
796 if (!sink_c->sink) {
797 ret = -EINVAL;
798 buffer_release(sink_c);
799 break;
800 }
801
802 type = dev_comp_type(sink_c->sink);
803 buffer_release(sink_c);
804
805 switch (type) {
806 case SOF_COMP_SELECTOR:
807 /* We found proper real time sink */
808 kpb->sel_sink = sink;
809 break;
810 case SOF_COMP_HOST:
811 /* We found proper host sink */
812 kpb->host_sink = sink;
813 break;
814 default:
815 break;
816 }
817 }
818 #else
819 /* Update number of sel_sink channels.
820 * If OBS is not equal to IBS it means that KPB will work in micselector mode.
821 */
822 if (kpb->ipc4_cfg.base_cfg.ibs != kpb->ipc4_cfg.base_cfg.obs) {
823 struct list_item *sink_list;
824 const uint32_t byte_align = 1;
825 const uint32_t frame_align_req = 1;
826 uint32_t sink_id;
827
828 list_for_item(sink_list, &dev->bsink_list) {
829 struct comp_buffer *sink =
830 container_of(sink_list, struct comp_buffer, source_list);
831 struct comp_buffer __sparse_cache *sink_c = buffer_acquire(sink);
832
833 audio_stream_init_alignment_constants(byte_align, frame_align_req,
834 &sink_c->stream);
835 sink_id = sink_c->id;
836
837 if (sink_id == 0)
838 sink_c->stream.channels = kpb->num_of_sel_mic;
839 else
840 sink_c->stream.channels = kpb->config.channels;
841
842 buffer_release(sink_c);
843 }
844 }
845 #endif /* CONFIG_IPC_MAJOR_4 */
846
847 if (!kpb->sel_sink) {
848 comp_err(dev, "kpb_prepare(): could not find sink: sel_sink %p",
849 kpb->sel_sink);
850 ret = -EIO;
851 }
852
853 kpb->sync_draining_mode = true;
854
855 kpb_change_state(kpb, KPB_STATE_RUN);
856
857 return ret;
858 }
859
860 /**
861 * \brief Used to pass standard and bespoke commands (with data) to component.
862 * \param[in,out] dev - Volume base component device.
863 * \param[in] cmd - Command type.
864 * \param[in,out] data - Control command data.
865 * \return Error code.
866 */
kpb_cmd(struct comp_dev * dev,int cmd,void * data,int max_data_size)867 static int kpb_cmd(struct comp_dev *dev, int cmd, void *data,
868 int max_data_size)
869 {
870 return 0;
871 }
872
873 /**
874 * \brief Resets KPB component.
875 * \param[in,out] dev KPB base component device.
876 * \return Error code.
877 */
kpb_reset(struct comp_dev * dev)878 static int kpb_reset(struct comp_dev *dev)
879 {
880 struct comp_data *kpb = comp_get_drvdata(dev);
881 int ret = 0;
882 int i;
883
884 comp_cl_info(&comp_kpb, "kpb_reset(): resetting from state %d, state log %x",
885 kpb->state, kpb->state_log);
886
887 switch (kpb->state) {
888 case KPB_STATE_BUFFERING:
889 case KPB_STATE_DRAINING:
890 /* KPB is performing some task now,
891 * terminate it gently.
892 */
893 kpb_change_state(kpb, KPB_STATE_RESETTING);
894 ret = -EBUSY;
895 break;
896 case KPB_STATE_DISABLED:
897 case KPB_STATE_CREATED:
898 /* Nothing to reset */
899 ret = comp_set_state(dev, COMP_TRIGGER_RESET);
900 break;
901 default:
902 kpb->hd.buffered = 0;
903 kpb->sel_sink = NULL;
904 kpb->host_sink = NULL;
905 kpb->host_buffer_size = 0;
906 kpb->host_period_size = 0;
907
908 for (i = 0; i < KPB_MAX_NO_OF_CLIENTS; i++) {
909 kpb->clients[i].state = KPB_CLIENT_UNREGISTERED;
910 kpb->clients[i].r_ptr = NULL;
911 }
912
913 if (kpb->hd.c_hb) {
914 /* Reset history buffer - zero its data, reset pointers
915 * and states.
916 */
917 kpb_reset_history_buffer(kpb->hd.c_hb);
918 }
919
920 /* Unregister KPB from notifications */
921 notifier_unregister(dev, NULL, NOTIFIER_ID_KPB_CLIENT_EVT);
922 /* Finally KPB is ready after reset */
923 kpb_change_state(kpb, KPB_STATE_PREPARING);
924
925 ret = comp_set_state(dev, COMP_TRIGGER_RESET);
926 break;
927 }
928
929 return ret;
930 }
931
932 #ifdef KPB_HIFI3
933 #if CONFIG_FORMAT_S16LE
kpb_micselect_copy16(struct comp_buffer __sparse_cache * sink,struct comp_buffer __sparse_cache * source,size_t size,uint32_t in_channels,uint32_t micsel_channels,uint32_t * offsets)934 static void kpb_micselect_copy16(struct comp_buffer __sparse_cache *sink,
935 struct comp_buffer __sparse_cache *source, size_t size,
936 uint32_t in_channels, uint32_t micsel_channels, uint32_t *offsets)
937 {
938 struct audio_stream __sparse_cache *istream = &source->stream;
939 struct audio_stream __sparse_cache *ostream = &sink->stream;
940 uint16_t ch;
941 size_t i;
942
943 AE_SETCBEGIN0(ostream->addr);
944 AE_SETCEND0(ostream->end_addr);
945
946 buffer_stream_invalidate(source, size);
947 const ae_int16 *in_ptr = (const ae_int16 *)istream->r_ptr;
948 ae_int16x4 d16 = AE_ZERO16();
949 const size_t in_offset = in_channels * sizeof(ae_int16);
950 const size_t out_offset = micsel_channels * sizeof(ae_int16);
951 const size_t samples_per_chan = size / (sizeof(uint16_t) * micsel_channels);
952 ae_int16 *out_ptr;
953
954 for (ch = 0; ch < micsel_channels; ch++) {
955 const ae_int16 *input_data = (const ae_int16 *)(in_ptr) + offsets[ch];
956
957 out_ptr = (ae_int16 *)ostream->w_ptr;
958 out_ptr += ch;
959 for (i = 0; i < samples_per_chan; i++) {
960 AE_L16_XP(d16, input_data, in_offset);
961 AE_S16_0_XC(d16, out_ptr, out_offset);
962 }
963 }
964 }
965 #endif
966 #if CONFIG_FORMAT_S24LE || CONFIG_FORMAT_S32LE
kpb_micselect_copy32(struct comp_buffer __sparse_cache * sink,struct comp_buffer __sparse_cache * source,size_t size,uint32_t in_channels,uint32_t micsel_channels,uint32_t * offsets)967 static void kpb_micselect_copy32(struct comp_buffer __sparse_cache *sink,
968 struct comp_buffer __sparse_cache *source, size_t size,
969 uint32_t in_channels, uint32_t micsel_channels, uint32_t *offsets)
970 {
971 struct audio_stream __sparse_cache *istream = &source->stream;
972 struct audio_stream __sparse_cache *ostream = &sink->stream;
973 uint16_t ch;
974 size_t i;
975
976 AE_SETCBEGIN0(ostream->addr);
977 AE_SETCEND0(ostream->end_addr);
978
979 buffer_stream_invalidate(source, size);
980
981 const ae_int32 *in_ptr = (const ae_int32 *)istream->r_ptr;
982 ae_int32x2 d32 = AE_ZERO32();
983 const size_t in_offset = in_channels * sizeof(ae_int32);
984 const size_t out_offset = micsel_channels * sizeof(ae_int32);
985 const size_t samples_per_chan = size / (sizeof(uint32_t) * micsel_channels);
986 ae_int32 *out_ptr;
987
988 for (ch = 0; ch < micsel_channels; ch++) {
989 const ae_int32 *input_data = (const ae_int32 *)(in_ptr) + offsets[ch];
990
991 out_ptr = (ae_int32 *)ostream->w_ptr;
992 out_ptr += ch;
993 for (i = 0; i < samples_per_chan; i++) {
994 AE_L32_XP(d32, input_data, in_offset);
995 AE_S32_L_XC(d32, out_ptr, out_offset);
996 }
997 }
998 }
999 #endif
1000 #else
kpb_micselect_copy16(struct comp_buffer __sparse_cache * sink,struct comp_buffer __sparse_cache * source,size_t size,uint32_t in_channels,uint32_t micsel_channels,uint32_t * offsets)1001 static void kpb_micselect_copy16(struct comp_buffer __sparse_cache *sink,
1002 struct comp_buffer __sparse_cache *source, size_t size,
1003 uint32_t in_channels, uint32_t micsel_channels, uint32_t *offsets)
1004 {
1005 struct audio_stream __sparse_cache *istream = &source->stream;
1006 struct audio_stream __sparse_cache *ostream = &sink->stream;
1007
1008 buffer_stream_invalidate(source, size);
1009 size_t out_samples;
1010 uint16_t ch;
1011
1012 const int16_t *in_data;
1013 int16_t *out_data;
1014 const uint32_t samples_per_chan = size / (sizeof(uint16_t) * micsel_channels);
1015
1016 for (ch = 0; ch < micsel_channels; ch++) {
1017 out_samples = 0;
1018 in_data = (int16_t *)istream->r_ptr;
1019 out_data = (int16_t *)ostream->w_ptr;
1020
1021 for (size_t i = 0; i < samples_per_chan * in_channels; i += in_channels) {
1022 if (&out_data[out_samples + ch]
1023 >= (int16_t *)ostream->end_addr) {
1024 out_data = (int16_t *)ostream->addr;
1025 out_samples = 0;
1026 }
1027 out_data[out_samples + ch] = in_data[i + offsets[ch]];
1028 out_samples += micsel_channels;
1029 }
1030 }
1031 }
1032
kpb_micselect_copy32(struct comp_buffer __sparse_cache * sink,struct comp_buffer __sparse_cache * source,size_t size,uint32_t in_channels,uint32_t micsel_channels,uint32_t * offsets)1033 static void kpb_micselect_copy32(struct comp_buffer __sparse_cache *sink,
1034 struct comp_buffer __sparse_cache *source, size_t size,
1035 uint32_t in_channels, uint32_t micsel_channels, uint32_t *offsets)
1036 {
1037 struct audio_stream __sparse_cache *istream = &source->stream;
1038 struct audio_stream __sparse_cache *ostream = &sink->stream;
1039
1040 buffer_stream_invalidate(source, size);
1041 size_t out_samples;
1042 uint16_t ch;
1043 const int32_t *in_data;
1044 int32_t *out_data;
1045 const uint32_t samples_per_chan = size / (sizeof(uint32_t) * micsel_channels);
1046
1047 for (ch = 0; ch < micsel_channels; ch++) {
1048 out_samples = 0;
1049 in_data = (int32_t *)istream->r_ptr;
1050 out_data = (int32_t *)ostream->w_ptr;
1051
1052 for (size_t i = 0; i < samples_per_chan * in_channels; i += in_channels) {
1053 if (&out_data[out_samples + ch]
1054 >= (int32_t *)ostream->end_addr) {
1055 out_data = (int32_t *)ostream->addr;
1056 out_samples = 0;
1057 }
1058 out_data[out_samples + ch] = in_data[i + offsets[ch]];
1059 out_samples += micsel_channels;
1060 }
1061 }
1062 }
1063 #endif
kpb_micselect_copy(struct comp_dev * dev,struct comp_buffer __sparse_cache * sink_c,struct comp_buffer __sparse_cache * source_c,size_t copy_bytes,uint32_t channels)1064 static void kpb_micselect_copy(struct comp_dev *dev, struct comp_buffer __sparse_cache *sink_c,
1065 struct comp_buffer __sparse_cache *source_c, size_t copy_bytes,
1066 uint32_t channels)
1067 {
1068 struct comp_data *kpb = comp_get_drvdata(dev);
1069 size_t sample_width = kpb->config.sampling_width;
1070 uint32_t *offsets = kpb->offsets;
1071
1072 switch (sample_width) {
1073 #if CONFIG_FORMAT_S16LE
1074 case 16:
1075 kpb_micselect_copy16(sink_c, source_c, copy_bytes,
1076 channels, kpb->num_of_sel_mic, offsets);
1077 break;
1078 #endif /* CONFIG_FORMAT_S16LE */
1079 #if CONFIG_FORMAT_S24LE || CONFIG_FORMAT_S32LE
1080 case 24:
1081 kpb_micselect_copy32(sink_c, source_c, copy_bytes,
1082 channels, kpb->num_of_sel_mic, offsets);
1083 break;
1084 case 32:
1085 kpb_micselect_copy32(sink_c, source_c, copy_bytes,
1086 channels, kpb->num_of_sel_mic, offsets);
1087 break;
1088 #endif /* CONFIG_FORMAT_S24LE || CONFIG_FORMAT_S32LE */
1089 default:
1090 comp_cl_err(&comp_kpb, "KPB: An attempt to copy not supported format!");
1091 return;
1092 }
1093 }
1094 /**
1095 * \brief Copy real time input stream into sink buffer,
1096 * and in the same time buffers that input for
1097 * later use by some of clients.
1098 *
1099 *\param[in] dev - kpb component device pointer.
1100 *
1101 * \return integer representing either:
1102 * 0 - success
1103 * -EINVAL - failure.
1104 */
kpb_copy(struct comp_dev * dev)1105 static int kpb_copy(struct comp_dev *dev)
1106 {
1107 int ret = 0;
1108 struct comp_data *kpb = comp_get_drvdata(dev);
1109 struct comp_buffer *source, *sink;
1110 struct comp_buffer __sparse_cache *source_c, *sink_c = NULL;
1111 size_t copy_bytes = 0, produced_bytes = 0;
1112 size_t sample_width = kpb->config.sampling_width;
1113 struct draining_data *dd = &kpb->draining_task_data;
1114 uint32_t avail_bytes;
1115 uint32_t channels = kpb->config.channels;
1116
1117 comp_dbg(dev, "kpb_copy()");
1118
1119 if (list_is_empty(&dev->bsource_list)) {
1120 comp_err(dev, "kpb_copy(): no source.");
1121 return -EINVAL;
1122 }
1123
1124 /* Get source and sink buffers */
1125 source = list_first_item(&dev->bsource_list, struct comp_buffer,
1126 sink_list);
1127
1128 source_c = buffer_acquire(source);
1129
1130 /* Validate source */
1131 if (!source_c->stream.r_ptr) {
1132 comp_err(dev, "kpb_copy(): invalid source pointers.");
1133 ret = -EINVAL;
1134 goto out;
1135 }
1136
1137 switch (kpb->state) {
1138 case KPB_STATE_RUN:
1139 /* In normal RUN state we simply copy to our sink. */
1140 sink = kpb->sel_sink;
1141 ret = PPL_STATUS_PATH_STOP;
1142
1143 if (!sink) {
1144 comp_err(dev, "kpb_copy(): no sink.");
1145 ret = -EINVAL;
1146 break;
1147 }
1148
1149 sink_c = buffer_acquire(sink);
1150
1151 /* Validate sink */
1152 if (!sink_c->stream.w_ptr) {
1153 comp_err(dev, "kpb_copy(): invalid selector sink pointers.");
1154 ret = -EINVAL;
1155 break;
1156 }
1157
1158 copy_bytes = audio_stream_get_copy_bytes(&source_c->stream, &sink_c->stream);
1159 if (!copy_bytes) {
1160 comp_err(dev, "kpb_copy(): nothing to copy sink->free %d source->avail %d",
1161 audio_stream_get_free_bytes(&sink_c->stream),
1162 audio_stream_get_avail_bytes(&source_c->stream));
1163 ret = PPL_STATUS_PATH_STOP;
1164 break;
1165 }
1166
1167 if (kpb->num_of_sel_mic == 0) {
1168 kpb_copy_samples(sink_c, source_c, copy_bytes, sample_width, channels);
1169 } else {
1170 uint32_t avail = audio_stream_get_avail_bytes(&source_c->stream);
1171 uint32_t free = audio_stream_get_free_bytes(&sink_c->stream);
1172
1173 copy_bytes = MIN(avail, free * channels / kpb->num_of_sel_mic);
1174 copy_bytes = ROUND_DOWN(copy_bytes, (sample_width >> 3) * channels);
1175 unsigned int total_bytes_per_sample =
1176 (sample_width >> 3) * kpb->num_of_sel_mic;
1177
1178 produced_bytes = copy_bytes * kpb->num_of_sel_mic / channels;
1179 produced_bytes = ROUND_DOWN(produced_bytes, total_bytes_per_sample);
1180 if (!copy_bytes) {
1181 comp_err(dev, "kpb_copy(): nothing to copy sink->free %d source->avail %d",
1182 free,
1183 avail);
1184 ret = PPL_STATUS_PATH_STOP;
1185 break;
1186 }
1187 kpb_micselect_copy(dev, sink_c, source_c, produced_bytes, channels);
1188 }
1189 /* Buffer source data internally in history buffer for future
1190 * use by clients.
1191 */
1192 if (copy_bytes <= kpb->hd.buffer_size) {
1193 ret = kpb_buffer_data(dev, source_c, copy_bytes);
1194
1195 if (ret) {
1196 comp_err(dev, "kpb_copy(): internal buffering failed.");
1197 break;
1198 }
1199 ret = PPL_STATUS_PATH_STOP;
1200
1201 /* Update buffered size. NOTE! We only record buffered
1202 * data up to the size of history buffer.
1203 */
1204 kpb->hd.buffered += MIN(kpb->hd.buffer_size -
1205 kpb->hd.buffered,
1206 copy_bytes);
1207 } else {
1208 comp_err(dev, "kpb_copy(): too much data to buffer.");
1209 }
1210
1211 if (kpb->num_of_sel_mic == 0)
1212 comp_update_buffer_produce(sink_c, copy_bytes);
1213 else
1214 comp_update_buffer_produce(sink_c, produced_bytes);
1215
1216 comp_update_buffer_consume(source_c, copy_bytes);
1217
1218 break;
1219 case KPB_STATE_HOST_COPY:
1220 /* In host copy state we only copy to host buffer. */
1221 sink = kpb->host_sink;
1222
1223 if (!sink) {
1224 comp_err(dev, "kpb_copy(): no sink.");
1225 ret = -EINVAL;
1226 break;
1227 }
1228
1229 sink_c = buffer_acquire(sink);
1230
1231 /* Validate sink */
1232 if (!sink_c->stream.w_ptr) {
1233 comp_err(dev, "kpb_copy(): invalid host sink pointers.");
1234 ret = -EINVAL;
1235 break;
1236 }
1237
1238 copy_bytes = audio_stream_get_copy_bytes(&source_c->stream, &sink_c->stream);
1239 if (!copy_bytes) {
1240 comp_err(dev, "kpb_copy(): nothing to copy sink->free %d source->avail %d",
1241 audio_stream_get_free_bytes(&sink_c->stream),
1242 audio_stream_get_avail_bytes(&source_c->stream));
1243 /* NOTE! We should stop further pipeline copy due to
1244 * no data availability however due to HW bug
1245 * (no HOST DMA IRQs) we need to call host copy
1246 * anyway so it can update its pointers.
1247 */
1248 break;
1249 }
1250
1251 kpb_copy_samples(sink_c, source_c, copy_bytes, sample_width, channels);
1252
1253 comp_update_buffer_produce(sink_c, copy_bytes);
1254 comp_update_buffer_consume(source_c, copy_bytes);
1255
1256 break;
1257 case KPB_STATE_INIT_DRAINING:
1258 case KPB_STATE_DRAINING:
1259 /* In draining and init draining we only buffer data in
1260 * the internal history buffer.
1261 */
1262 avail_bytes = audio_stream_get_avail_bytes(&source_c->stream);
1263 copy_bytes = MIN(avail_bytes, kpb->hd.free);
1264 ret = PPL_STATUS_PATH_STOP;
1265 if (copy_bytes) {
1266 buffer_stream_invalidate(source_c, copy_bytes);
1267 ret = kpb_buffer_data(dev, source_c, copy_bytes);
1268 dd->buffered_while_draining += copy_bytes;
1269 kpb->hd.free -= copy_bytes;
1270
1271 if (ret) {
1272 comp_err(dev, "kpb_copy(): internal buffering failed.");
1273 break;
1274 }
1275
1276 comp_update_buffer_consume(source_c, copy_bytes);
1277 } else {
1278 comp_warn(dev, "kpb_copy(): buffering skipped (no data to copy, avail %d, free %d",
1279 audio_stream_get_avail_bytes(&source_c->stream),
1280 kpb->hd.free);
1281 }
1282
1283 break;
1284 default:
1285 comp_cl_err(&comp_kpb, "kpb_copy(): wrong state (state %d, state log %x)",
1286 kpb->state, kpb->state_log);
1287 ret = -EIO;
1288 break;
1289 }
1290
1291 out:
1292 if (sink_c)
1293 buffer_release(sink_c);
1294 buffer_release(source_c);
1295
1296 return ret;
1297 }
1298
1299 /**
1300 * \brief Buffer real time data stream in
1301 * the internal buffer.
1302 *
1303 * \param[in] dev - KPB component data pointer.
1304 * \param[in] source - pointer to the buffer source.
1305 *
1306 */
kpb_buffer_data(struct comp_dev * dev,const struct comp_buffer __sparse_cache * source,size_t size)1307 static int kpb_buffer_data(struct comp_dev *dev,
1308 const struct comp_buffer __sparse_cache *source, size_t size)
1309 {
1310 int ret = 0;
1311 size_t size_to_copy = size;
1312 size_t space_avail;
1313 struct comp_data *kpb = comp_get_drvdata(dev);
1314 struct history_buffer *buff = kpb->hd.c_hb;
1315 uint32_t offset = 0;
1316 uint64_t timeout = 0;
1317 uint64_t current_time;
1318 enum kpb_state state_preserved = kpb->state;
1319 size_t sample_width = kpb->config.sampling_width;
1320
1321 comp_dbg(dev, "kpb_buffer_data()");
1322
1323 /* We are allowed to buffer data in internal history buffer
1324 * only in KPB_STATE_RUN, KPB_STATE_DRAINING or KPB_STATE_INIT_DRAINING
1325 * states.
1326 */
1327 if (kpb->state != KPB_STATE_RUN &&
1328 kpb->state != KPB_STATE_DRAINING &&
1329 kpb->state != KPB_STATE_INIT_DRAINING) {
1330 comp_err(dev, "kpb_buffer_data(): wrong state! (current state %d, state log %x)",
1331 kpb->state, kpb->state_log);
1332 return PPL_STATUS_PATH_STOP;
1333 }
1334
1335 kpb_change_state(kpb, KPB_STATE_BUFFERING);
1336
1337 timeout = sof_cycle_get_64() + k_ms_to_cyc_ceil64(1);
1338 /* Let's store audio stream data in internal history buffer */
1339 while (size_to_copy) {
1340 /* Reset was requested, it's time to stop buffering and finish
1341 * KPB reset.
1342 */
1343 if (kpb->state == KPB_STATE_RESETTING) {
1344 kpb_change_state(kpb, KPB_STATE_RESET_FINISHING);
1345 kpb_reset(dev);
1346 return PPL_STATUS_PATH_STOP;
1347 }
1348
1349 /* Are we stuck in buffering? */
1350 current_time = sof_cycle_get_64();
1351 if (timeout < current_time) {
1352 timeout = k_cyc_to_ms_near64(current_time - timeout);
1353 if (timeout <= UINT_MAX)
1354 comp_err(dev,
1355 "kpb_buffer_data(): timeout of %u [ms] (current state %d, state log %x)",
1356 (unsigned int)(timeout), kpb->state,
1357 kpb->state_log);
1358 else
1359 comp_err(dev,
1360 "kpb_buffer_data(): timeout > %u [ms] (current state %d, state log %x)",
1361 UINT_MAX, kpb->state,
1362 kpb->state_log);
1363 return -ETIME;
1364 }
1365
1366 /* Check how much space there is in current write buffer */
1367 space_avail = (uintptr_t)buff->end_addr - (uintptr_t)buff->w_ptr;
1368
1369 if (size_to_copy > space_avail) {
1370 /* We have more data to copy than available space
1371 * in this buffer, copy what's available and continue
1372 * with next buffer.
1373 */
1374 kpb_buffer_samples(&source->stream, offset, buff->w_ptr,
1375 space_avail, sample_width);
1376 /* Update write pointer & requested copy size */
1377 buff->w_ptr = (char *)buff->w_ptr + space_avail;
1378 size_to_copy = size_to_copy - space_avail;
1379 /* Update read pointer's offset before continuing
1380 * with next buffer.
1381 */
1382 offset += space_avail;
1383 } else {
1384 /* Requested size is smaller or equal to the space
1385 * available in this buffer. In this scenario simply
1386 * copy what was requested.
1387 */
1388 kpb_buffer_samples(&source->stream, offset, buff->w_ptr,
1389 size_to_copy, sample_width);
1390 /* Update write pointer & requested copy size */
1391 buff->w_ptr = (char *)buff->w_ptr + size_to_copy;
1392 /* Reset requested copy size */
1393 size_to_copy = 0;
1394 }
1395 /* Have we filled whole buffer? */
1396 if (buff->w_ptr == buff->end_addr) {
1397 /* Reset write pointer back to the beginning
1398 * of the buffer.
1399 */
1400 buff->w_ptr = buff->start_addr;
1401 /* If we have more buffers use them */
1402 if (buff->next && buff->next != buff) {
1403 /* Mark current buffer FULL */
1404 buff->state = KPB_BUFFER_FULL;
1405 /* Use next buffer available on the list
1406 * of buffers.
1407 */
1408 buff = buff->next;
1409 /* Update also component container,
1410 * so next time we enter buffering function
1411 * we will know right away what is the current
1412 * write buffer
1413 */
1414 kpb->hd.c_hb = buff;
1415 }
1416 /* Mark buffer as FREE */
1417 buff->state = KPB_BUFFER_FREE;
1418 }
1419 }
1420
1421 kpb_change_state(kpb, state_preserved);
1422 return ret;
1423 }
1424
1425 /**
1426 * \brief Main event dispatcher.
1427 * \param[in] arg - KPB component internal data.
1428 * \param[in] type - notification type
1429 * \param[in] event_data - event specific data.
1430 * \return none.
1431 */
kpb_event_handler(void * arg,enum notify_id type,void * event_data)1432 static void kpb_event_handler(void *arg, enum notify_id type, void *event_data)
1433 {
1434 struct comp_dev *dev = arg;
1435 struct comp_data *kpb = comp_get_drvdata(dev);
1436 struct kpb_event_data *evd = event_data;
1437 struct kpb_client *cli = evd->client_data;
1438
1439 comp_info(dev, "kpb_event_handler(): received event with ID: %d ",
1440 evd->event_id);
1441
1442 switch (evd->event_id) {
1443 case KPB_EVENT_REGISTER_CLIENT:
1444 kpb_register_client(kpb, cli);
1445 break;
1446 case KPB_EVENT_UNREGISTER_CLIENT:
1447 /*TODO*/
1448 break;
1449 case KPB_EVENT_BEGIN_DRAINING:
1450 kpb_init_draining(dev, cli);
1451 break;
1452 case KPB_EVENT_STOP_DRAINING:
1453 /*TODO*/
1454 break;
1455 default:
1456 comp_err(dev, "kpb_cmd(): unsupported command");
1457 break;
1458 }
1459 }
1460
1461 /**
1462 * \brief Register clients in the system.
1463 *
1464 * \param[in] kpb - kpb device component pointer.
1465 * \param[in] cli - pointer to KPB client's data.
1466 *
1467 * \return integer representing either:
1468 * 0 - success
1469 * -EINVAL - failure.
1470 */
kpb_register_client(struct comp_data * kpb,struct kpb_client * cli)1471 static int kpb_register_client(struct comp_data *kpb, struct kpb_client *cli)
1472 {
1473 int ret = 0;
1474
1475 comp_cl_info(&comp_kpb, "kpb_register_client()");
1476
1477 if (!cli) {
1478 comp_cl_err(&comp_kpb, "kpb_register_client(): no client data");
1479 return -EINVAL;
1480 }
1481 /* Do we have a room for a new client? */
1482 if (kpb->kpb_no_of_clients >= KPB_MAX_NO_OF_CLIENTS ||
1483 cli->id >= KPB_MAX_NO_OF_CLIENTS) {
1484 comp_cl_err(&comp_kpb, "kpb_register_client(): no free room for client = %u ",
1485 cli->id);
1486 ret = -EINVAL;
1487 } else if (kpb->clients[cli->id].state != KPB_CLIENT_UNREGISTERED) {
1488 comp_cl_err(&comp_kpb, "kpb_register_client(): client = %u already registered",
1489 cli->id);
1490 ret = -EINVAL;
1491 } else {
1492 /* Client accepted, let's store his data */
1493 kpb->clients[cli->id].id = cli->id;
1494 kpb->clients[cli->id].drain_req = cli->drain_req;
1495 kpb->clients[cli->id].sink = cli->sink;
1496 kpb->clients[cli->id].r_ptr = NULL;
1497 kpb->clients[cli->id].state = KPB_CLIENT_BUFFERING;
1498 kpb->kpb_no_of_clients++;
1499 ret = 0;
1500 }
1501
1502 return ret;
1503 }
1504
1505 /**
1506 * \brief Prepare history buffer for draining.
1507 *
1508 * \param[in] dev - kpb component data.
1509 * \param[in] cli - client's data.
1510 *
1511 */
kpb_init_draining(struct comp_dev * dev,struct kpb_client * cli)1512 static void kpb_init_draining(struct comp_dev *dev, struct kpb_client *cli)
1513 {
1514 struct comp_data *kpb = comp_get_drvdata(dev);
1515 bool is_sink_ready = (kpb->host_sink->sink->state == COMP_STATE_ACTIVE);
1516 size_t sample_width = kpb->config.sampling_width;
1517 size_t drain_req = cli->drain_req * kpb->config.channels *
1518 (kpb->config.sampling_freq / 1000) *
1519 (KPB_SAMPLE_CONTAINER_SIZE(sample_width) / 8);
1520 struct history_buffer *buff = kpb->hd.c_hb;
1521 struct history_buffer *first_buff = buff;
1522 size_t buffered = 0;
1523 size_t local_buffered;
1524 size_t drain_interval;
1525 size_t host_period_size = kpb->host_period_size;
1526 size_t bytes_per_ms = KPB_SAMPLES_PER_MS *
1527 (KPB_SAMPLE_CONTAINER_SIZE(sample_width) / 8) *
1528 kpb->config.channels;
1529 size_t period_bytes_limit;
1530
1531 comp_info(dev, "kpb_init_draining(): requested draining of %d [ms] from history buffer",
1532 cli->drain_req);
1533
1534 if (kpb->state != KPB_STATE_RUN) {
1535 comp_err(dev, "kpb_init_draining(): wrong KPB state");
1536 } else if (cli->id > KPB_MAX_NO_OF_CLIENTS) {
1537 comp_err(dev, "kpb_init_draining(): wrong client id");
1538 /* TODO: check also if client is registered */
1539 } else if (!is_sink_ready) {
1540 comp_err(dev, "kpb_init_draining(): sink not ready for draining");
1541 } else if (kpb->hd.buffered < drain_req ||
1542 cli->drain_req > KPB_MAX_DRAINING_REQ) {
1543 comp_cl_err(&comp_kpb, "kpb_init_draining(): not enough data in history buffer");
1544 } else {
1545 /* Draining accepted, find proper buffer to start reading
1546 * At this point we are guaranteed that there is enough data
1547 * in the history buffer. All we have to do now is to calculate
1548 * read pointer from which we will start draining.
1549 */
1550 kpb_lock(kpb);
1551
1552 kpb_change_state(kpb, KPB_STATE_INIT_DRAINING);
1553
1554 /* Set history buffer size so new data won't overwrite those
1555 * staged for draining.
1556 */
1557 kpb->hd.free = kpb->hd.buffer_size - drain_req;
1558
1559 /* Find buffer to start draining from */
1560 do {
1561 /* Calculate how much data we have stored in
1562 * current buffer.
1563 */
1564 buff->r_ptr = buff->start_addr;
1565 if (buff->state == KPB_BUFFER_FREE) {
1566 local_buffered = (uintptr_t)buff->w_ptr -
1567 (uintptr_t)buff->start_addr;
1568 buffered += local_buffered;
1569 } else if (buff->state == KPB_BUFFER_FULL) {
1570 local_buffered = (uintptr_t)buff->end_addr -
1571 (uintptr_t)buff->start_addr;
1572 buffered += local_buffered;
1573 } else {
1574 comp_err(dev, "kpb_init_draining(): incorrect buffer label");
1575 }
1576 /* Check if this is already sufficient to start draining
1577 * if not, go to previous buffer and continue
1578 * calculations.
1579 */
1580 if (drain_req > buffered) {
1581 if (buff->prev == first_buff) {
1582 /* We went full circle and still don't
1583 * have sufficient data for draining.
1584 * That means we need to look up the
1585 * first buffer again. Our read pointer
1586 * is somewhere between write pointer
1587 * and buffer's end address.
1588 */
1589 buff = buff->prev;
1590 buffered += (uintptr_t)buff->end_addr -
1591 (uintptr_t)buff->w_ptr;
1592 buff->r_ptr = (char *)buff->w_ptr +
1593 (buffered - drain_req);
1594 break;
1595 }
1596 buff = buff->prev;
1597 } else if (drain_req == buffered) {
1598 buff->r_ptr = buff->start_addr;
1599 break;
1600 } else {
1601 buff->r_ptr = (char *)buff->start_addr +
1602 (buffered - drain_req);
1603 break;
1604 }
1605
1606 } while (buff != first_buff);
1607
1608 kpb_unlock(kpb);
1609
1610 /* Should we drain in synchronized mode (sync_draining_mode)?
1611 * Note! We have already verified host params during
1612 * kpb_prepare().
1613 */
1614 if (kpb->sync_draining_mode) {
1615 /* Calculate time in clock ticks each draining event
1616 * shall take place. This time will be used to
1617 * synchronize us with application interrupts.
1618 */
1619 drain_interval = k_ms_to_cyc_ceil64(host_period_size / bytes_per_ms) /
1620 KPB_DRAIN_NUM_OF_PPL_PERIODS_AT_ONCE;
1621 period_bytes_limit = host_period_size;
1622 comp_info(dev, "kpb_init_draining(): sync_draining_mode selected with interval %u [uS].",
1623 (unsigned int)k_cyc_to_us_near64(drain_interval));
1624 } else {
1625 /* Unlimited draining */
1626 drain_interval = 0;
1627 period_bytes_limit = 0;
1628 comp_info(dev, "kpb_init_draining: unlimited draining speed selected.");
1629 }
1630
1631 comp_info(dev, "kpb_init_draining(), schedule draining task");
1632
1633 /* Add one-time draining task into the scheduler. */
1634 kpb->draining_task_data.sink = kpb->host_sink;
1635 kpb->draining_task_data.hb = buff;
1636 kpb->draining_task_data.drain_req = drain_req;
1637 kpb->draining_task_data.sample_width = sample_width;
1638 kpb->draining_task_data.drain_interval = drain_interval;
1639 kpb->draining_task_data.pb_limit = period_bytes_limit;
1640 kpb->draining_task_data.dev = dev;
1641 kpb->draining_task_data.sync_mode_on = kpb->sync_draining_mode;
1642
1643 /* save current sink copy type */
1644 comp_get_attribute(kpb->host_sink->sink, COMP_ATTR_COPY_TYPE,
1645 &kpb->draining_task_data.copy_type);
1646
1647 if (kpb->force_copy_type != COMP_COPY_INVALID)
1648 comp_set_attribute(kpb->host_sink->sink, COMP_ATTR_COPY_TYPE,
1649 &kpb->force_copy_type);
1650
1651 /* Pause selector copy. */
1652 kpb->sel_sink->sink->state = COMP_STATE_PAUSED;
1653
1654 /* Schedule draining task */
1655 schedule_task(&kpb->draining_task, 0, 0);
1656 }
1657 }
1658
1659 /**
1660 * \brief Draining task.
1661 *
1662 * \param[in] arg - pointer keeping drainig data previously prepared
1663 * by kpb_init_draining().
1664 *
1665 * \return none.
1666 */
kpb_draining_task(void * arg)1667 static enum task_state kpb_draining_task(void *arg)
1668 {
1669 struct draining_data *draining_data = (struct draining_data *)arg;
1670 struct comp_buffer __sparse_cache *sink = buffer_acquire(draining_data->sink);
1671 struct history_buffer *buff = draining_data->hb;
1672 size_t drain_req = draining_data->drain_req;
1673 size_t sample_width = draining_data->sample_width;
1674 size_t size_to_read;
1675 size_t size_to_copy;
1676 bool move_buffer = false;
1677 uint32_t drained = 0;
1678 uint64_t draining_time_start;
1679 uint64_t draining_time_end;
1680 uint64_t draining_time_ms;
1681 uint64_t drain_interval = draining_data->drain_interval;
1682 uint64_t next_copy_time = 0;
1683 uint64_t current_time;
1684 size_t period_bytes = 0;
1685 size_t period_bytes_limit = draining_data->pb_limit;
1686 size_t period_copy_start = sof_cycle_get_64();
1687 size_t time_taken;
1688 size_t *rt_stream_update = &draining_data->buffered_while_draining;
1689 struct comp_data *kpb = comp_get_drvdata(draining_data->dev);
1690 bool sync_mode_on = draining_data->sync_mode_on;
1691 bool pm_is_active;
1692
1693 comp_cl_info(&comp_kpb, "kpb_draining_task(), start.");
1694
1695 pm_is_active = pm_runtime_is_active(PM_RUNTIME_DSP, PLATFORM_PRIMARY_CORE_ID);
1696
1697 if (!pm_is_active)
1698 pm_runtime_disable(PM_RUNTIME_DSP, PLATFORM_PRIMARY_CORE_ID);
1699
1700 /* Change KPB internal state to DRAINING */
1701 kpb_change_state(kpb, KPB_STATE_DRAINING);
1702
1703 draining_time_start = sof_cycle_get_64();
1704
1705 while (drain_req > 0) {
1706 /* Have we received reset request? */
1707 if (kpb->state == KPB_STATE_RESETTING) {
1708 kpb_change_state(kpb, KPB_STATE_RESET_FINISHING);
1709 kpb_reset(draining_data->dev);
1710 goto out;
1711 }
1712 /* Are we ready to drain further or host still need some time
1713 * to read the data already provided?
1714 */
1715 if (sync_mode_on &&
1716 next_copy_time > sof_cycle_get_64()) {
1717 period_bytes = 0;
1718 period_copy_start = sof_cycle_get_64();
1719 continue;
1720 } else if (next_copy_time == 0) {
1721 period_copy_start = sof_cycle_get_64();
1722 }
1723
1724 size_to_read = (uintptr_t)buff->end_addr - (uintptr_t)buff->r_ptr;
1725
1726 if (size_to_read > audio_stream_get_free_bytes(&sink->stream)) {
1727 if (audio_stream_get_free_bytes(&sink->stream) >= drain_req)
1728 size_to_copy = drain_req;
1729 else
1730 size_to_copy = audio_stream_get_free_bytes(&sink->stream);
1731 } else {
1732 if (size_to_read > drain_req) {
1733 size_to_copy = drain_req;
1734 } else {
1735 size_to_copy = size_to_read;
1736 move_buffer = true;
1737 }
1738 }
1739
1740 kpb_drain_samples(buff->r_ptr, &sink->stream, size_to_copy,
1741 sample_width);
1742
1743 buff->r_ptr = (char *)buff->r_ptr + (uint32_t)size_to_copy;
1744 drain_req -= size_to_copy;
1745 drained += size_to_copy;
1746 period_bytes += size_to_copy;
1747 kpb->hd.free += MIN(kpb->hd.buffer_size -
1748 kpb->hd.free, size_to_copy);
1749
1750 if (move_buffer) {
1751 buff->r_ptr = buff->start_addr;
1752 buff = buff->next;
1753 move_buffer = false;
1754 }
1755
1756 if (size_to_copy) {
1757 comp_update_buffer_produce(sink, size_to_copy);
1758 comp_copy(sink->sink);
1759 } else if (!audio_stream_get_free_bytes(&sink->stream)) {
1760 /* There is no free space in sink buffer.
1761 * Call .copy() on sink component so it can
1762 * process its data further.
1763 */
1764 comp_copy(sink->sink);
1765 }
1766
1767 if (sync_mode_on && period_bytes >= period_bytes_limit) {
1768 current_time = sof_cycle_get_64();
1769 time_taken = current_time - period_copy_start;
1770 next_copy_time = current_time + drain_interval -
1771 time_taken;
1772 }
1773
1774 if (drain_req == 0) {
1775 /* We have finished draining of requested data however
1776 * while we were draining real time stream could provided
1777 * new data which needs to be copy to host.
1778 */
1779 comp_cl_info(&comp_kpb, "kpb: update drain_req by %d",
1780 *rt_stream_update);
1781 kpb_lock(kpb);
1782 drain_req += *rt_stream_update;
1783 *rt_stream_update = 0;
1784 if (!drain_req && kpb->state == KPB_STATE_DRAINING) {
1785 /* Draining is done. Now switch KPB to copy real time
1786 * stream to client's sink. This state is called
1787 * "draining on demand"
1788 * Note! If KPB state changed during draining due to
1789 * i.e reset request we should not change that state.
1790 */
1791 kpb_change_state(kpb, KPB_STATE_HOST_COPY);
1792 }
1793 kpb_unlock(kpb);
1794 }
1795 }
1796
1797 out:
1798 draining_time_end = sof_cycle_get_64();
1799
1800 buffer_release(sink);
1801
1802 /* Reset host-sink copy mode back to its pre-draining value */
1803 sink = buffer_acquire(kpb->host_sink);
1804 comp_set_attribute(sink->sink, COMP_ATTR_COPY_TYPE,
1805 &kpb->draining_task_data.copy_type);
1806 buffer_release(sink);
1807
1808 draining_time_ms = k_cyc_to_ms_near64(draining_time_end - draining_time_start);
1809 if (draining_time_ms <= UINT_MAX)
1810 comp_cl_info(&comp_kpb, "KPB: kpb_draining_task(), done. %u drained in %u ms",
1811 drained, (unsigned int)draining_time_ms);
1812 else
1813 comp_cl_info(&comp_kpb, "KPB: kpb_draining_task(), done. %u drained in > %u ms",
1814 drained, UINT_MAX);
1815
1816 return SOF_TASK_STATE_COMPLETED;
1817 }
1818
1819 #ifdef KPB_HIFI3
kpb_convert_24b_to_32b(const void * linear_source,int ioffset,struct audio_stream __sparse_cache * sink,int ooffset,unsigned int n_samples)1820 static void kpb_convert_24b_to_32b(const void *linear_source, int ioffset,
1821 struct audio_stream __sparse_cache *sink, int ooffset,
1822 unsigned int n_samples)
1823 {
1824 int ssize = audio_stream_sample_bytes(sink);
1825 uint8_t *in = (uint8_t *)linear_source + ioffset * ssize;
1826 uint8_t *out = audio_stream_wrap(sink, (uint8_t *)sink->w_ptr + ooffset * ssize);
1827 ae_int32x2 *buf_end;
1828 ae_int32x2 *buf;
1829
1830 buf = (ae_int32x2 *)(sink->addr);
1831 buf_end = (ae_int32x2 *)(sink->end_addr);
1832 ae_int32x2 *out_ptr = (ae_int32x2 *)buf;
1833
1834 AE_SETCBEGIN0(buf);
1835 AE_SETCEND0(buf_end);
1836 out_ptr = (ae_int32x2 *)out;
1837
1838 ae_valign align_in = AE_LA64_PP(in);
1839 int i = 0;
1840 ae_int24x2 d24 = AE_ZERO24();
1841
1842 if (!IS_ALIGNED((uintptr_t)out_ptr, 8)) {
1843 AE_LA24_IP(d24, align_in, in);
1844 ae_int32x2 d320 = d24;
1845 int higher = AE_MOVAD32_H(d320);
1846 *(ae_int32 *)(out_ptr) = higher << 8;
1847 out_ptr = (ae_int32x2 *)(out + 4);
1848 ++i;
1849 }
1850 /* process two samples in single iteration to increase performance */
1851 while (i < (int)n_samples - 1) {
1852 AE_LA24X2_IP(d24, align_in, in);
1853 ae_int32x2 d320 = d24;
1854
1855 d320 = AE_SLAI32(d320, 8);
1856 AE_S32X2_XC(d320, out_ptr, 8);
1857 i += 2;
1858 }
1859 if (i != (int)n_samples) {
1860 AE_LA24X2_IP(d24, align_in, in);
1861 ae_int32x2 d320 = d24;
1862 int higher = AE_MOVAD32_H(d320);
1863 *(ae_int32 *)(out_ptr) = higher << 8;
1864 }
1865 }
1866 #else
kpb_convert_24b_to_32b(const void * source,int ioffset,struct audio_stream __sparse_cache * sink,int ooffset,unsigned int samples)1867 static void kpb_convert_24b_to_32b(const void *source, int ioffset,
1868 struct audio_stream __sparse_cache *sink,
1869 int ooffset, unsigned int samples)
1870 {
1871 int ssize = audio_stream_sample_bytes(sink);
1872 uint8_t *src = (uint8_t *)source + ioffset * 3;
1873 int32_t *dst = audio_stream_wrap(sink, (uint8_t *)sink->w_ptr + ooffset * ssize);
1874 int processed;
1875 int nmax, i, n;
1876
1877 for (processed = 0; processed < samples; processed += n) {
1878 dst = audio_stream_wrap(sink, dst);
1879 n = samples - processed;
1880 nmax = KPB_BYTES_TO_S32_SAMPLES(audio_stream_bytes_without_wrap(sink, dst));
1881 n = MIN(n, nmax);
1882 for (i = 0; i < n; i += 1) {
1883 *dst = (src[2] << 16) | (src[1] << 8) | src[0];
1884 dst++;
1885 src += 3;
1886 }
1887 }
1888 }
1889 #endif
1890 /**
1891 * \brief Drain data samples safe, according to configuration.
1892 *
1893 * \param[in] sink - pointer to sink buffer.
1894 * \param[in] source - pointer to source buffer.
1895 * \param[in] size - requested copy size in bytes.
1896 *
1897 * \return none.
1898 */
kpb_drain_samples(void * source,struct audio_stream __sparse_cache * sink,size_t size,size_t sample_width)1899 static void kpb_drain_samples(void *source, struct audio_stream __sparse_cache *sink,
1900 size_t size, size_t sample_width)
1901 {
1902 unsigned int samples;
1903
1904 switch (sample_width) {
1905 #if CONFIG_FORMAT_S16LE
1906 case 16:
1907 samples = KPB_BYTES_TO_S16_SAMPLES(size);
1908 audio_stream_copy_from_linear(source, 0, sink, 0, samples);
1909 break;
1910 #endif /* CONFIG_FORMAT_S16LE */
1911 #if CONFIG_FORMAT_S24LE || CONFIG_FORMAT_S32LE
1912 case 24:
1913 samples = size / ((sample_width >> 3) * sink->channels);
1914 kpb_convert_24b_to_32b(source, 0, sink, 0, samples);
1915 break;
1916 case 32:
1917 samples = KPB_BYTES_TO_S32_SAMPLES(size);
1918 audio_stream_copy_from_linear(source, 0, sink, 0, samples);
1919 break;
1920 #endif /* CONFIG_FORMAT_S24LE || CONFIG_FORMAT_S32LE */
1921 default:
1922 comp_cl_err(&comp_kpb, "KPB: An attempt to copy not supported format!");
1923 return;
1924 }
1925 }
1926
1927 #ifdef KPB_HIFI3
kpb_convert_32b_to_24b(const struct audio_stream __sparse_cache * source,int ioffset,void * linear_sink,int ooffset,unsigned int n_samples)1928 static void kpb_convert_32b_to_24b(const struct audio_stream __sparse_cache *source, int ioffset,
1929 void *linear_sink, int ooffset, unsigned int n_samples)
1930 {
1931 int ssize = audio_stream_sample_bytes(source);
1932 uint8_t *in = audio_stream_wrap(source, (uint8_t *)source->r_ptr + ioffset * ssize);
1933 uint8_t *out = (uint8_t *)linear_sink + ooffset * ssize;
1934
1935 const ae_f24x2 *sin = (const ae_f24x2 *)in;
1936 ae_f24x2 *sout = (ae_f24x2 *)out;
1937
1938 ae_f24x2 vs = AE_ZERO24();
1939 ae_valign align_out = AE_ZALIGN64();
1940
1941 if (!IS_ALIGNED((uintptr_t)sin, 8)) {
1942 AE_L32F24_XC(vs, (const ae_f24 *)sin, 4);
1943 AE_SA24_IP(vs, align_out, sout);
1944 n_samples--;
1945 }
1946
1947 unsigned int size = n_samples >> 1;
1948 size_t i;
1949
1950 for (i = 0; i < size; i++) {
1951 AE_L32X2F24_XC(vs, sin, 8);
1952 AE_SA24X2_IP(vs, align_out, sout);
1953 }
1954 AE_SA64POS_FP(align_out, sout);
1955
1956 if (n_samples & 1) {
1957 AE_L32X2F24_XC(vs, sin, 4);
1958 ae_f24 tmp = AE_MOVAD32_H(AE_MOVINT24X2_FROMF24X2(vs));
1959
1960 AE_SA24_IP(tmp, align_out, sout);
1961 AE_SA64POS_FP(align_out, sout);
1962 }
1963 }
1964 #else
kpb_convert_32b_to_24b(const struct audio_stream __sparse_cache * source,int ioffset,void * sink,int ooffset,unsigned int samples)1965 static void kpb_convert_32b_to_24b(const struct audio_stream __sparse_cache *source, int ioffset,
1966 void *sink, int ooffset, unsigned int samples)
1967 {
1968 int ssize = audio_stream_sample_bytes(source);
1969 int32_t *src = audio_stream_wrap(source, (uint8_t *)source->r_ptr + ioffset * ssize);
1970 uint8_t *dst = (uint8_t *)sink + ooffset * 3;
1971 int processed;
1972 int nmax, i, n;
1973
1974 for (processed = 0; processed < samples; processed += n) {
1975 src = audio_stream_wrap(source, src);
1976 n = samples - processed;
1977 nmax = KPB_BYTES_TO_S32_SAMPLES(audio_stream_bytes_without_wrap(source, src));
1978 n = MIN(n, nmax);
1979 for (i = 0; i < n; i += 1) {
1980 dst[0] = *src & 0xFF;
1981 dst[1] = (*src >> 8) & 0xFF;
1982 dst[2] = (*src >> 16) & 0xFF;
1983 dst += 3;
1984 src++;
1985 }
1986 }
1987 }
1988 #endif
1989 /**
1990 * \brief Buffers data samples safe, according to configuration.
1991 * \param[in,out] source Pointer to source buffer.
1992 * \param[in] offset Start offset of source buffer in bytes.
1993 * \param[in,out] sink Pointer to sink buffer.
1994 * \param[in] size Requested copy size in bytes.
1995 * \param[in] sample_width Sample size.
1996 */
kpb_buffer_samples(const struct audio_stream __sparse_cache * source,int offset,void * sink,size_t size,size_t sample_width)1997 static void kpb_buffer_samples(const struct audio_stream __sparse_cache *source,
1998 int offset, void *sink, size_t size,
1999 size_t sample_width)
2000 {
2001 unsigned int samples_count;
2002 int samples_offset;
2003
2004 switch (sample_width) {
2005 #if CONFIG_FORMAT_S16LE
2006 case 16:
2007 samples_count = KPB_BYTES_TO_S16_SAMPLES(size);
2008 samples_offset = KPB_BYTES_TO_S16_SAMPLES(offset);
2009 audio_stream_copy_to_linear(source, samples_offset,
2010 sink, 0, samples_count);
2011 break;
2012 #endif
2013 #if CONFIG_FORMAT_S24LE || CONFIG_FORMAT_S32LE
2014 case 24:
2015 samples_count = size / ((sample_width >> 3) * source->channels);
2016 samples_offset = offset / ((sample_width >> 3) * source->channels);
2017 kpb_convert_32b_to_24b(source, samples_offset,
2018 sink, 0, samples_count);
2019 break;
2020 case 32:
2021 samples_count = KPB_BYTES_TO_S32_SAMPLES(size);
2022 samples_offset = KPB_BYTES_TO_S32_SAMPLES(offset);
2023 audio_stream_copy_to_linear(source, samples_offset,
2024 sink, 0, samples_count);
2025 break;
2026 #endif
2027 default:
2028 comp_cl_err(&comp_kpb, "KPB: An attempt to copy not supported format!");
2029 return;
2030 }
2031 }
2032
2033 /**
2034 * \brief Initialize history buffer by zeroing its memory.
2035 * \param[in] buff - pointer to current history buffer.
2036 *
2037 * \return: none.
2038 */
kpb_clear_history_buffer(struct history_buffer * buff)2039 static void kpb_clear_history_buffer(struct history_buffer *buff)
2040 {
2041 struct history_buffer *first_buff = buff;
2042 void *start_addr;
2043 size_t size;
2044
2045 comp_cl_info(&comp_kpb, "kpb_clear_history_buffer()");
2046
2047 do {
2048 start_addr = buff->start_addr;
2049 size = (uintptr_t)buff->end_addr - (uintptr_t)start_addr;
2050
2051 bzero(start_addr, size);
2052
2053 buff = buff->next;
2054 } while (buff != first_buff);
2055 }
2056
kpb_is_sample_width_supported(uint32_t sampling_width)2057 static inline bool kpb_is_sample_width_supported(uint32_t sampling_width)
2058 {
2059 bool ret;
2060
2061 switch (sampling_width) {
2062 #if CONFIG_FORMAT_S16LE
2063 case 16:
2064 /* FALLTHROUGH */
2065 #endif /* CONFIG_FORMAT_S16LE */
2066 #if CONFIG_FORMAT_S24LE
2067 case 24:
2068 /* FALLTHROUGH */
2069 #endif /* CONFIG_FORMAT_S24LE */
2070 #if CONFIG_FORMAT_S32LE
2071 case 32:
2072 #endif /* CONFIG_FORMAT_S32LE */
2073 ret = true;
2074 break;
2075 default:
2076 ret = false;
2077 break;
2078 }
2079
2080 return ret;
2081 }
2082
2083 #ifdef KPB_HIFI3
kpb_copy_24b_in_32b(const struct audio_stream __sparse_cache * source,uint32_t ioffset,struct audio_stream __sparse_cache * sink,uint32_t ooffset,uint32_t n_samples)2084 static void kpb_copy_24b_in_32b(const struct audio_stream __sparse_cache *source, uint32_t ioffset,
2085 struct audio_stream __sparse_cache *sink, uint32_t ooffset,
2086 uint32_t n_samples)
2087 {
2088 int ssize = audio_stream_sample_bytes(source); /* src fmt == sink fmt */
2089 uint8_t *in = audio_stream_wrap(source, (uint8_t *)source->r_ptr + ioffset * ssize);
2090 uint8_t *out = audio_stream_wrap(sink, (uint8_t *)sink->w_ptr + ooffset * ssize);
2091
2092 const ae_int32x2 *sin = (const ae_int32x2 *)in;
2093 ae_int32x2 *sout = (ae_int32x2 *)out;
2094 ae_int32x2 vs = AE_ZERO32();
2095
2096 if (!IS_ALIGNED((uintptr_t)sin, 8)) {
2097 AE_L32_IP(vs, (const ae_int32 *)sin, 4);
2098 AE_S32_L_IP(vs, (ae_int32 *)sout, 4);
2099 n_samples--;
2100 }
2101 ae_valign align_out = AE_ZALIGN64();
2102 size_t size = n_samples >> 1;
2103 size_t i;
2104
2105 for (i = 0; i < size; i++) {
2106 AE_L32X2_IP(vs, sin, 8);
2107 AE_SA32X2_IP(vs, align_out, sout);
2108 }
2109 AE_SA64POS_FP(align_out, sout);
2110 if (n_samples & 1) {
2111 vs = AE_L32_I((const ae_int32 *)sin, 0);
2112 AE_S32_L_I(vs, (ae_int32 *)sout, 0);
2113 }
2114 }
2115 #else
kpb_copy_24b_in_32b(const struct audio_stream __sparse_cache * source,uint32_t ioffset,struct audio_stream __sparse_cache * sink,uint32_t ooffset,uint32_t samples)2116 static void kpb_copy_24b_in_32b(const struct audio_stream __sparse_cache *source,
2117 uint32_t ioffset, struct audio_stream __sparse_cache *sink,
2118 uint32_t ooffset, uint32_t samples)
2119 {
2120 int32_t *src = source->r_ptr;
2121 int32_t *dst = sink->w_ptr;
2122 int processed;
2123 int nmax, i, n;
2124
2125 src += ioffset;
2126 dst += ooffset;
2127 for (processed = 0; processed < samples; processed += n) {
2128 src = audio_stream_wrap(source, src);
2129 dst = audio_stream_wrap(sink, dst);
2130 n = samples - processed;
2131 nmax = KPB_BYTES_TO_S32_SAMPLES(audio_stream_bytes_without_wrap(source, src));
2132 n = MIN(n, nmax);
2133 nmax = KPB_BYTES_TO_S32_SAMPLES(audio_stream_bytes_without_wrap(sink, dst));
2134 n = MIN(n, nmax);
2135 for (i = 0; i < n; i++) {
2136 *dst = *src << 8;
2137 src++;
2138 dst++;
2139 }
2140 }
2141 }
2142 #endif
2143 /**
2144 * \brief Copy data samples safe, according to configuration.
2145 *
2146 * \param[in] sink - pointer to sink buffer.
2147 * \param[in] source - pointer to source buffer.
2148 * \param[in] size - requested copy size in bytes.
2149 *
2150 * \return none.
2151 */
kpb_copy_samples(struct comp_buffer __sparse_cache * sink,struct comp_buffer __sparse_cache * source,size_t size,size_t sample_width,uint32_t channels)2152 static void kpb_copy_samples(struct comp_buffer __sparse_cache *sink,
2153 struct comp_buffer __sparse_cache *source, size_t size,
2154 size_t sample_width, uint32_t channels)
2155 {
2156 struct audio_stream __sparse_cache *istream = &source->stream;
2157 struct audio_stream __sparse_cache *ostream = &sink->stream;
2158 unsigned int samples;
2159
2160 buffer_stream_invalidate(source, size);
2161 switch (sample_width) {
2162 #if CONFIG_FORMAT_S16LE
2163 case 16:
2164 audio_stream_copy(istream, 0, ostream, 0, KPB_BYTES_TO_S16_SAMPLES(size));
2165 break;
2166 #endif
2167 #if CONFIG_FORMAT_S24LE || CONFIG_FORMAT_S32LE
2168 case 24:
2169 samples = size / ((sample_width >> 3) * channels);
2170 kpb_copy_24b_in_32b(istream, 0, ostream, 0, samples);
2171 break;
2172 case 32:
2173 audio_stream_copy(istream, 0, ostream, 0, KPB_BYTES_TO_S32_SAMPLES(size));
2174 break;
2175 #endif
2176 default:
2177 comp_cl_err(&comp_kpb, "KPB: An attempt to copy not supported format!");
2178 return;
2179 }
2180
2181 buffer_stream_writeback(sink, size);
2182 }
2183
2184 /**
2185 * \brief Reset history buffer.
2186 * \param[in] buff - pointer to current history buffer.
2187 *
2188 * \return none.
2189 */
kpb_reset_history_buffer(struct history_buffer * buff)2190 static void kpb_reset_history_buffer(struct history_buffer *buff)
2191 {
2192 struct history_buffer *first_buff = buff;
2193
2194 comp_cl_info(&comp_kpb, "kpb_reset_history_buffer()");
2195
2196 if (!buff)
2197 return;
2198
2199 kpb_clear_history_buffer(buff);
2200
2201 do {
2202 buff->w_ptr = buff->start_addr;
2203 buff->r_ptr = buff->start_addr;
2204 buff->state = KPB_BUFFER_FREE;
2205
2206 buff = buff->next;
2207
2208 } while (buff != first_buff);
2209 }
2210
validate_host_params(struct comp_dev * dev,size_t host_period_size,size_t host_buffer_size,size_t hb_size_req)2211 static inline bool validate_host_params(struct comp_dev *dev,
2212 size_t host_period_size,
2213 size_t host_buffer_size,
2214 size_t hb_size_req)
2215 {
2216 /* The aim of this function is to perform basic check of host params
2217 * and reject them if they won't allow for stable draining.
2218 * Note however that this is highly recommended for host buffer to
2219 * be at least twice the history buffer size. This will quarantee
2220 * "safe" draining.
2221 * By safe we mean no XRUNs(host was unable to read data on time),
2222 * or loss of data due to host delayed read. The later condition
2223 * is very likely after wake up from power state like d0ix.
2224 */
2225 struct comp_data *kpb = comp_get_drvdata(dev);
2226 size_t sample_width = kpb->config.sampling_width;
2227 size_t bytes_per_ms = KPB_SAMPLES_PER_MS *
2228 (KPB_SAMPLE_CONTAINER_SIZE(sample_width) / 8) *
2229 kpb->config.channels;
2230 size_t pipeline_period_size = (dev->pipeline->period / 1000)
2231 * bytes_per_ms;
2232
2233 if (!host_period_size || !host_buffer_size) {
2234 /* Wrong host params */
2235 comp_err(dev, "kpb: host_period_size (%d) cannot be 0 and host_buffer_size (%d) cannot be 0",
2236 host_period_size, host_buffer_size);
2237 return false;
2238 } else if (HOST_BUFFER_MIN_SIZE(hb_size_req, kpb->config.channels) >
2239 host_buffer_size) {
2240 /* Host buffer size is too small - history data
2241 * may get overwritten.
2242 */
2243 comp_warn(dev, "kpb: host_buffer_size (%d) must be at least %d",
2244 host_buffer_size,
2245 HOST_BUFFER_MIN_SIZE(hb_size_req, kpb->config.channels));
2246 } else if (kpb->sync_draining_mode) {
2247 /* Sync draining allowed. Check if we can perform draining
2248 * with current settings.
2249 * In this mode we copy host period size to host
2250 * (to avoid overwrite of buffered data by real time stream
2251 * this period shall be bigger than pipeline period) and
2252 * give host some time to read it. Therefore, in worst
2253 * case scenario, we copy one period of real time data + some
2254 * of buffered data.
2255 */
2256 if ((host_period_size / KPB_DRAIN_NUM_OF_PPL_PERIODS_AT_ONCE) <
2257 pipeline_period_size) {
2258 comp_err(dev, "kpb: host_period_size (%d) must be at least %d * %d",
2259 host_period_size,
2260 KPB_DRAIN_NUM_OF_PPL_PERIODS_AT_ONCE,
2261 pipeline_period_size);
2262 return false;
2263 }
2264 }
2265
2266 return true;
2267 }
2268
2269 /**
2270 * \brief Change KPB state and log this change internally.
2271 * \param[in] kpb - KPB component data pointer.
2272 * \param[in] state - current KPB state.
2273 *
2274 * \return none.
2275 */
kpb_change_state(struct comp_data * kpb,enum kpb_state state)2276 static inline void kpb_change_state(struct comp_data *kpb,
2277 enum kpb_state state)
2278 {
2279 comp_cl_dbg(&comp_kpb, "kpb_change_state(): from %d to %d",
2280 kpb->state, state);
2281 kpb->state = state;
2282 kpb->state_log = (kpb->state_log << 4) | state;
2283 }
2284
kpb_set_micselect(struct comp_dev * dev,const void * data,int max_data_size)2285 static int kpb_set_micselect(struct comp_dev *dev, const void *data,
2286 int max_data_size)
2287 {
2288 const struct kpb_micselector_config *mic_sel = data;
2289 struct comp_data *kpb = comp_get_drvdata(dev);
2290 const size_t mic_cnt = kpb->config.channels - KPB_REFERENCE_SUPPORT_CHANNELS;
2291 const uint8_t valid_mask = KPB_COUNT_TO_BITMASK(mic_cnt);
2292 size_t i;
2293
2294 if ((valid_mask & mic_sel->mask) == 0) {
2295 comp_err(dev, "error: invalid micselector bit mask");
2296 return -EINVAL;
2297 }
2298 /* selected mics counter */
2299 size_t num_of_sel_mic = 0;
2300
2301 for (i = 0; i < mic_cnt; i++) {
2302 if (KPB_IS_BIT_SET(mic_sel->mask, i)) {
2303 kpb->offsets[num_of_sel_mic] = i;
2304 num_of_sel_mic++;
2305 }
2306 }
2307 kpb->num_of_sel_mic = num_of_sel_mic;
2308 kpb->num_of_in_channels = kpb->config.channels;
2309 kpb->mic_sel.mask = mic_sel->mask;
2310 return 0;
2311 }
2312
kpb_set_large_config(struct comp_dev * dev,uint32_t param_id,bool first_block,bool last_block,uint32_t data_offset,const char * data)2313 static int kpb_set_large_config(struct comp_dev *dev, uint32_t param_id,
2314 bool first_block,
2315 bool last_block,
2316 uint32_t data_offset,
2317 const char *data)
2318 {
2319 comp_info(dev, "kpb_set_large_config()");
2320
2321 switch (param_id) {
2322 case KP_BUF_CLIENT_MIC_SELECT:
2323 return kpb_set_micselect(dev, data, data_offset);
2324 default:
2325 return -EINVAL;
2326 }
2327 }
2328
2329 static const struct comp_driver comp_kpb = {
2330 .type = SOF_COMP_KPB,
2331 .uid = SOF_RT_UUID(kpb_uuid),
2332 .tctx = &kpb_tr,
2333 .ops = {
2334 .create = kpb_new,
2335 .free = kpb_free,
2336 .cmd = kpb_cmd,
2337 .trigger = kpb_trigger,
2338 .copy = kpb_copy,
2339 .prepare = kpb_prepare,
2340 .reset = kpb_reset,
2341 .params = kpb_params,
2342 .set_large_config = kpb_set_large_config,
2343 #ifdef CONFIG_IPC_MAJOR_4
2344 .get_attribute = kpb_get_attribute,
2345 .bind = kpb_bind,
2346 .unbind = kpb_unbind,
2347 #endif /* CONFIG_IPC_MAJOR_4 */
2348 },
2349 };
2350
2351 static SHARED_DATA struct comp_driver_info comp_kpb_info = {
2352 .drv = &comp_kpb,
2353 };
2354
sys_comp_kpb_init(void)2355 UT_STATIC void sys_comp_kpb_init(void)
2356 {
2357 comp_register(platform_shared_get(&comp_kpb_info,
2358 sizeof(comp_kpb_info)));
2359 }
2360
2361 DECLARE_MODULE(sys_comp_kpb_init);
2362 SOF_MODULE_INIT(kpb, sys_comp_kpb_init);
2363