1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 // Copyright(c) 2019 Intel Corporation. All rights reserved.
4 //
5 // Author: Marcin Rajwa <marcin.rajwa@linux.intel.com>
6
7 /*
8 * A key phrase buffer component.
9 */
10
11 /**
12 * \file audio/kpb.c
13 * \brief Key phrase buffer component implementation
14 * \author Marcin Rajwa <marcin.rajwa@linux.intel.com>
15 */
16
17 #include <sof/audio/buffer.h>
18 #include <sof/audio/component_ext.h>
19 #include <sof/audio/pipeline.h>
20 #include <sof/audio/kpb.h>
21 #include <sof/audio/ipc-config.h>
22 #include <sof/common.h>
23 #include <sof/debug/panic.h>
24 #include <sof/ipc/msg.h>
25 #include <sof/drivers/timer.h>
26 #include <sof/lib/alloc.h>
27 #include <sof/lib/clk.h>
28 #include <sof/lib/memory.h>
29 #include <sof/lib/notifier.h>
30 #include <sof/lib/pm_runtime.h>
31 #include <sof/lib/uuid.h>
32 #include <sof/list.h>
33 #include <sof/math/numbers.h>
34 #include <sof/platform.h>
35 #include <sof/schedule/edf_schedule.h>
36 #include <sof/schedule/schedule.h>
37 #include <sof/schedule/task.h>
38 #include <sof/string.h>
39 #include <sof/ut.h>
40 #include <ipc/topology.h>
41 #include <user/kpb.h>
42 #include <user/trace.h>
43 #include <errno.h>
44 #include <limits.h>
45 #include <stdbool.h>
46 #include <stddef.h>
47 #include <stdint.h>
48
49 static const struct comp_driver comp_kpb;
50
51 /* d8218443-5ff3-4a4c-b388-6cfe07b9562e */
52 DECLARE_SOF_RT_UUID("kpb", kpb_uuid, 0xd8218443, 0x5ff3, 0x4a4c,
53 0xb3, 0x88, 0x6c, 0xfe, 0x07, 0xb9, 0x56, 0x2e);
54
55 DECLARE_TR_CTX(kpb_tr, SOF_UUID(kpb_uuid), LOG_LEVEL_INFO);
56
57 /* e50057a5-8b27-4db4-bd79-9a639cee5f50 */
58 DECLARE_SOF_UUID("kpb-task", kpb_task_uuid, 0xe50057a5, 0x8b27, 0x4db4,
59 0xbd, 0x79, 0x9a, 0x63, 0x9c, 0xee, 0x5f, 0x50);
60
61 /* KPB private data, runtime data */
62 struct comp_data {
63 enum kpb_state state; /**< current state of KPB component */
64 uint32_t state_log; /**< keeps record of KPB recent states */
65 spinlock_t lock; /**< locking mechanism for read pointer calculations */
66 struct sof_kpb_config config; /**< component configuration data */
67 struct history_data hd; /** data related to history buffer */
68 struct task draining_task;
69 struct draining_data draining_task_data;
70 struct kpb_client clients[KPB_MAX_NO_OF_CLIENTS];
71 struct comp_buffer *sel_sink; /**< real time sink (channel selector )*/
72 struct comp_buffer *host_sink; /**< draining sink (client) */
73 uint32_t kpb_no_of_clients; /**< number of registered clients */
74 uint32_t source_period_bytes; /**< source number of period bytes */
75 uint32_t sink_period_bytes; /**< sink number of period bytes */
76 size_t host_buffer_size; /**< size of host buffer */
77 size_t host_period_size; /**< size of history period */
78 bool sync_draining_mode; /**< should we synchronize draining with
79 * host?
80 */
81 enum comp_copy_type force_copy_type; /**< should we force copy_type on kpb sink? */
82 };
83
84 /*! KPB private functions */
85 static void kpb_event_handler(void *arg, enum notify_id type, void *event_data);
86 static int kpb_register_client(struct comp_data *kpb, struct kpb_client *cli);
87 static void kpb_init_draining(struct comp_dev *dev, struct kpb_client *cli);
88 static enum task_state kpb_draining_task(void *arg);
89 static int kpb_buffer_data(struct comp_dev *dev,
90 const struct comp_buffer *source, size_t size);
91 static size_t kpb_allocate_history_buffer(struct comp_data *kpb,
92 size_t hb_size_req);
93 static void kpb_clear_history_buffer(struct history_buffer *buff);
94 static void kpb_free_history_buffer(struct history_buffer *buff);
95 static inline bool kpb_is_sample_width_supported(uint32_t sampling_width);
96 static void kpb_copy_samples(struct comp_buffer *sink,
97 struct comp_buffer *source, size_t size,
98 size_t sample_width);
99 static void kpb_drain_samples(void *source, struct audio_stream *sink,
100 size_t size, size_t sample_width);
101 static void kpb_buffer_samples(const struct audio_stream *source,
102 uint32_t start, void *sink, size_t size,
103 size_t sample_width);
104 static void kpb_reset_history_buffer(struct history_buffer *buff);
105 static inline bool validate_host_params(struct comp_dev *dev,
106 size_t host_period_size,
107 size_t host_buffer_size,
108 size_t hb_size_req);
109 static inline void kpb_change_state(struct comp_data *kpb,
110 enum kpb_state state);
111
kpb_task_deadline(void * data)112 static uint64_t kpb_task_deadline(void *data)
113 {
114 return SOF_TASK_DEADLINE_ALMOST_IDLE;
115 }
116
117 /**
118 * \brief Create a key phrase buffer component.
119 * \param[in] config - generic ipc component pointer.
120 *
121 * \return: a pointer to newly created KPB component.
122 */
kpb_new(const struct comp_driver * drv,struct comp_ipc_config * config,void * spec)123 static struct comp_dev *kpb_new(const struct comp_driver *drv,
124 struct comp_ipc_config *config,
125 void *spec)
126 {
127 struct ipc_config_process *ipc_process = spec;
128 struct task_ops ops = {
129 .run = kpb_draining_task,
130 .get_deadline = kpb_task_deadline,
131 };
132 struct comp_dev *dev;
133 struct comp_data *kpb;
134 int ret;
135
136 comp_cl_info(&comp_kpb, "kpb_new()");
137
138 /* make sure data size is not bigger than config space */
139 if (ipc_process->size > sizeof(struct sof_kpb_config)) {
140 comp_cl_err(&comp_kpb, "kpb_new(): data size %u too big",
141 ipc_process->size);
142 return NULL;
143 }
144
145 dev = comp_alloc(drv, sizeof(*dev));
146 if (!dev)
147 return NULL;
148 dev->ipc_config = *config;
149
150 kpb = rzalloc(SOF_MEM_ZONE_RUNTIME, 0, SOF_MEM_CAPS_RAM, sizeof(*kpb));
151 if (!kpb) {
152 rfree(dev);
153 return NULL;
154 }
155
156 comp_set_drvdata(dev, kpb);
157
158 ret = memcpy_s(&kpb->config, sizeof(kpb->config), ipc_process->data,
159 ipc_process->size);
160 assert(!ret);
161
162 if (!kpb_is_sample_width_supported(kpb->config.sampling_width)) {
163 comp_err(dev, "kpb_new(): requested sampling width not supported");
164 rfree(dev);
165 return NULL;
166 }
167
168 if (kpb->config.channels > KPB_MAX_SUPPORTED_CHANNELS) {
169 comp_err(dev, "kpb_new(): no of channels exceeded the limit");
170 rfree(dev);
171 return NULL;
172 }
173
174 if (kpb->config.sampling_freq != KPB_SAMPLNG_FREQUENCY) {
175 comp_err(dev, "kpb_new(): requested sampling frequency not supported");
176 rfree(dev);
177 return NULL;
178 }
179
180 /* Initialize draining task */
181 schedule_task_init_edf(&kpb->draining_task, /* task structure */
182 SOF_UUID(kpb_task_uuid), /* task uuid */
183 &ops, /* task ops */
184 &kpb->draining_task_data, /* task private data */
185 0, /* core on which we should run */
186 0); /* no flags */
187
188 /* Init basic component data */
189 kpb->hd.c_hb = NULL;
190 kpb->kpb_no_of_clients = 0;
191 kpb->state_log = 0;
192
193 #ifdef CONFIG_KPB_FORCE_COPY_TYPE_NORMAL
194 kpb->force_copy_type = COMP_COPY_NORMAL;
195 #else
196 kpb->force_copy_type = COMP_COPY_INVALID; /* do not change kpb sink copy type */
197 #endif
198
199 /* Kpb has been created successfully */
200 dev->state = COMP_STATE_READY;
201 kpb_change_state(kpb, KPB_STATE_CREATED);
202
203 return dev;
204 }
205
206 /**
207 * \brief Allocate history buffer.
208 * \param[in] kpb - KPB component data pointer.
209 *
210 * \return: none.
211 */
kpb_allocate_history_buffer(struct comp_data * kpb,size_t hb_size_req)212 static size_t kpb_allocate_history_buffer(struct comp_data *kpb,
213 size_t hb_size_req)
214 {
215 struct history_buffer *hb;
216 struct history_buffer *new_hb = NULL;
217 /*! Total allocation size */
218 size_t hb_size = hb_size_req;
219 /*! Current allocation size */
220 size_t ca_size = hb_size;
221 /*! Memory caps priorites for history buffer */
222 int hb_mcp[KPB_NO_OF_MEM_POOLS] = {SOF_MEM_CAPS_LP, SOF_MEM_CAPS_HP,
223 SOF_MEM_CAPS_RAM };
224 void *new_mem_block = NULL;
225 size_t temp_ca_size;
226 int i = 0;
227 size_t allocated_size = 0;
228
229 comp_cl_info(&comp_kpb, "kpb_allocate_history_buffer()");
230
231 /* Initialize history buffer */
232 kpb->hd.c_hb = rzalloc(SOF_MEM_ZONE_RUNTIME, 0, SOF_MEM_CAPS_RAM,
233 sizeof(struct history_buffer));
234 if (!kpb->hd.c_hb)
235 return 0;
236 kpb->hd.c_hb->next = kpb->hd.c_hb;
237 kpb->hd.c_hb->prev = kpb->hd.c_hb;
238 hb = kpb->hd.c_hb;
239
240 /* Allocate history buffer/s. KPB history buffer has a size of
241 * KPB_MAX_BUFFER_SIZE, since there is no single memory block
242 * that big, we need to allocate couple smaller blocks which
243 * linked together will form history buffer.
244 */
245 while (hb_size > 0 && i < ARRAY_SIZE(hb_mcp)) {
246 /* Try to allocate ca_size (current allocation size). At first
247 * attempt it will be equal to hb_size (history buffer size).
248 */
249 new_mem_block = rballoc(0, hb_mcp[i], ca_size);
250
251 if (new_mem_block) {
252 /* We managed to allocate a block of ca_size.
253 * Now we initialize it.
254 */
255 comp_cl_info(&comp_kpb, "kpb new memory block: %d",
256 ca_size);
257 allocated_size += ca_size;
258 hb->start_addr = new_mem_block;
259 hb->end_addr = (char *)new_mem_block +
260 ca_size;
261 hb->w_ptr = new_mem_block;
262 hb->r_ptr = new_mem_block;
263 hb->state = KPB_BUFFER_FREE;
264 hb_size -= ca_size;
265 hb->next = kpb->hd.c_hb;
266 /* Do we need another buffer? */
267 if (hb_size > 0) {
268 /* Yes, we still need at least one more buffer.
269 * Let's first create new container for it.
270 */
271 new_hb = rzalloc(SOF_MEM_ZONE_RUNTIME, 0,
272 SOF_MEM_CAPS_RAM,
273 sizeof(struct history_buffer));
274 if (!new_hb)
275 return 0;
276 hb->next = new_hb;
277 new_hb->next = kpb->hd.c_hb;
278 new_hb->state = KPB_BUFFER_OFF;
279 new_hb->prev = hb;
280 hb = new_hb;
281 kpb->hd.c_hb->prev = new_hb;
282 ca_size = hb_size;
283 i++;
284 }
285 } else {
286 /* We've failed to allocate ca_size of that hb_mcp
287 * let's try again with some smaller size.
288 * NOTE! If we decrement by some small value,
289 * the allocation will take significant time.
290 * However, bigger values like
291 * HEAP_HP_BUFFER_BLOCK_SIZE will result in lower
292 * accuracy of allocation.
293 */
294 temp_ca_size = ca_size - KPB_ALLOCATION_STEP;
295 ca_size = (ca_size < temp_ca_size) ? 0 : temp_ca_size;
296 if (ca_size == 0) {
297 ca_size = hb_size;
298 i++;
299 }
300 continue;
301 }
302 }
303
304 comp_cl_info(&comp_kpb, "kpb_allocate_history_buffer(): allocated %d bytes",
305 allocated_size);
306
307 return allocated_size;
308 }
309
310 /**
311 * \brief Reclaim memory of a history buffer.
312 * \param[in] buff - pointer to current history buffer.
313 *
314 * \return none.
315 */
kpb_free_history_buffer(struct history_buffer * buff)316 static void kpb_free_history_buffer(struct history_buffer *buff)
317 {
318 struct history_buffer *_buff;
319 struct history_buffer *first_buff = buff;
320
321 comp_cl_info(&comp_kpb, "kpb_free_history_buffer()");
322
323 if (!buff)
324 return;
325
326 /* Free history buffer/s */
327 do {
328 /* First reclaim HB internal memory, then HB itself */
329 if (buff->start_addr)
330 rfree(buff->start_addr);
331
332 _buff = buff->next;
333 rfree(buff);
334 buff = _buff;
335 } while (buff && buff != first_buff);
336 }
337
338 /**
339 * \brief Reclaim memory of a key phrase buffer.
340 * \param[in] dev - component device pointer.
341 *
342 * \return none.
343 */
kpb_free(struct comp_dev * dev)344 static void kpb_free(struct comp_dev *dev)
345 {
346 struct comp_data *kpb = comp_get_drvdata(dev);
347
348 comp_info(dev, "kpb_free()");
349
350 /* Unregister KPB from notifications */
351 notifier_unregister(dev, NULL, NOTIFIER_ID_KPB_CLIENT_EVT);
352
353 /* Reclaim memory occupied by history buffer */
354 kpb_free_history_buffer(kpb->hd.c_hb);
355 kpb->hd.c_hb = NULL;
356 kpb->hd.buffer_size = 0;
357
358 /* remove scheduling */
359 schedule_task_free(&kpb->draining_task);
360
361 /* change state */
362 kpb_change_state(kpb, KPB_STATE_DISABLED);
363
364 /* Free KPB */
365 rfree(kpb);
366 rfree(dev);
367 }
368
369 /**
370 * \brief Trigger a change of KPB state.
371 * \param[in] dev - component device pointer.
372 * \param[in] cmd - command type.
373 * \return none.
374 */
kpb_trigger(struct comp_dev * dev,int cmd)375 static int kpb_trigger(struct comp_dev *dev, int cmd)
376 {
377 comp_info(dev, "kpb_trigger()");
378
379 return comp_set_state(dev, cmd);
380 }
381
kbp_verify_params(struct comp_dev * dev,struct sof_ipc_stream_params * params)382 static int kbp_verify_params(struct comp_dev *dev,
383 struct sof_ipc_stream_params *params)
384 {
385 int ret;
386
387 comp_dbg(dev, "kbp_verify_params()");
388
389 ret = comp_verify_params(dev, 0, params);
390 if (ret < 0) {
391 comp_err(dev, "kpb_verify_params(): comp_verify_params() failed");
392 return ret;
393 }
394
395 return 0;
396 }
397
398 /**
399 * \brief KPB params.
400 * \param[in] dev - component device pointer.
401 * \param[in] params - pcm params.
402 * \return none.
403 */
kpb_params(struct comp_dev * dev,struct sof_ipc_stream_params * params)404 static int kpb_params(struct comp_dev *dev,
405 struct sof_ipc_stream_params *params)
406 {
407 struct comp_data *kpb = comp_get_drvdata(dev);
408 int err;
409
410 if (dev->state == COMP_STATE_PREPARE) {
411 comp_err(dev, "kpb_params(): kpb has been already configured.");
412 return PPL_STATUS_PATH_STOP;
413 }
414
415 err = kbp_verify_params(dev, params);
416 if (err < 0) {
417 comp_err(dev, "kpb_params(): pcm params verification failed");
418 return -EINVAL;
419 }
420
421 kpb->host_buffer_size = params->buffer.size;
422 kpb->host_period_size = params->host_period_bytes;
423 kpb->config.sampling_width = params->sample_container_bytes * 8;
424
425 return 0;
426 }
427
428 /**
429 * \brief Prepare key phrase buffer.
430 * \param[in] dev - kpb component device pointer.
431 *
432 * \return integer representing either:
433 * 0 -> success
434 * -EINVAL -> failure.
435 */
kpb_prepare(struct comp_dev * dev)436 static int kpb_prepare(struct comp_dev *dev)
437 {
438 struct comp_data *kpb = comp_get_drvdata(dev);
439 int ret = 0;
440 int i;
441 struct list_item *blist;
442 struct comp_buffer *sink;
443 size_t hb_size_req = KPB_MAX_BUFFER_SIZE(kpb->config.sampling_width);
444
445 comp_info(dev, "kpb_prepare()");
446
447 if (kpb->state == KPB_STATE_RESETTING ||
448 kpb->state == KPB_STATE_RESET_FINISHING) {
449 comp_cl_err(&comp_kpb, "kpb_prepare(): can not prepare KPB due to ongoing reset, state log %x",
450 kpb->state_log);
451 return -EBUSY;
452 }
453
454 ret = comp_set_state(dev, COMP_TRIGGER_PREPARE);
455 if (ret < 0)
456 return ret;
457
458 if (ret == COMP_STATUS_STATE_ALREADY_SET)
459 return PPL_STATUS_PATH_STOP;
460
461 if (!validate_host_params(dev, kpb->host_period_size,
462 kpb->host_buffer_size, hb_size_req)) {
463 return -EINVAL;
464 }
465
466 kpb_change_state(kpb, KPB_STATE_PREPARING);
467
468 /* Init private data */
469 kpb->kpb_no_of_clients = 0;
470 kpb->hd.buffered = 0;
471 kpb->sel_sink = NULL;
472 kpb->host_sink = NULL;
473
474 if (kpb->hd.c_hb && kpb->hd.buffer_size < hb_size_req) {
475 /* Host params has changed, we need to allocate new buffer */
476 kpb_free_history_buffer(kpb->hd.c_hb);
477 kpb->hd.c_hb = NULL;
478 }
479
480 if (!kpb->hd.c_hb) {
481 /* Allocate history buffer */
482 kpb->hd.buffer_size = kpb_allocate_history_buffer(kpb,
483 hb_size_req);
484
485 /* Have we allocated what we requested? */
486 if (kpb->hd.buffer_size < hb_size_req) {
487 comp_cl_err(&comp_kpb, "kpb_prepare(): failed to allocate space for KPB buffer");
488 kpb_free_history_buffer(kpb->hd.c_hb);
489 kpb->hd.c_hb = NULL;
490 kpb->hd.buffer_size = 0;
491 return -EINVAL;
492 }
493 }
494 /* Init history buffer */
495 kpb_reset_history_buffer(kpb->hd.c_hb);
496 kpb->hd.free = kpb->hd.buffer_size;
497
498 /* Initialize clients data */
499 for (i = 0; i < KPB_MAX_NO_OF_CLIENTS; i++) {
500 kpb->clients[i].state = KPB_CLIENT_UNREGISTERED;
501 kpb->clients[i].r_ptr = NULL;
502 }
503
504 /* Register KPB for notification */
505 ret = notifier_register(dev, NULL, NOTIFIER_ID_KPB_CLIENT_EVT,
506 kpb_event_handler, 0);
507 if (ret < 0) {
508 kpb_free_history_buffer(kpb->hd.c_hb);
509 kpb->hd.c_hb = NULL;
510 return -ENOMEM;
511 }
512
513 /* Search for KPB related sinks.
514 * NOTE! We assume here that channel selector component device
515 * is connected to the KPB sinks as well as host device.
516 */
517 list_for_item(blist, &dev->bsink_list) {
518 sink = container_of(blist, struct comp_buffer, source_list);
519
520 if (!sink->sink) {
521 ret = -EINVAL;
522 break;
523 }
524 if (dev_comp_type(sink->sink) == SOF_COMP_SELECTOR) {
525 /* We found proper real time sink */
526 kpb->sel_sink = sink;
527 } else if (dev_comp_type(sink->sink) == SOF_COMP_HOST) {
528 /* We found proper host sink */
529 kpb->host_sink = sink;
530 }
531 }
532
533 if (!kpb->sel_sink || !kpb->host_sink) {
534 comp_info(dev, "kpb_prepare(): could not find sinks: sel_sink %p host_sink %p",
535 kpb->sel_sink, kpb->host_sink);
536 ret = -EIO;
537 }
538
539 /* Disallow sync_draining_mode for now */
540 kpb->sync_draining_mode = false;
541
542 kpb_change_state(kpb, KPB_STATE_RUN);
543
544 return ret;
545 }
546
547 /**
548 * \brief Used to pass standard and bespoke commands (with data) to component.
549 * \param[in,out] dev - Volume base component device.
550 * \param[in] cmd - Command type.
551 * \param[in,out] data - Control command data.
552 * \return Error code.
553 */
kpb_cmd(struct comp_dev * dev,int cmd,void * data,int max_data_size)554 static int kpb_cmd(struct comp_dev *dev, int cmd, void *data,
555 int max_data_size)
556 {
557 return 0;
558 }
559
560 /**
561 * \brief Resets KPB component.
562 * \param[in,out] dev KPB base component device.
563 * \return Error code.
564 */
kpb_reset(struct comp_dev * dev)565 static int kpb_reset(struct comp_dev *dev)
566 {
567 struct comp_data *kpb = comp_get_drvdata(dev);
568 int ret = 0;
569
570 comp_cl_info(&comp_kpb, "kpb_reset(): resetting from state %d, state log %x",
571 kpb->state, kpb->state_log);
572
573 switch (kpb->state) {
574 case KPB_STATE_BUFFERING:
575 case KPB_STATE_DRAINING:
576 /* KPB is performing some task now,
577 * terminate it gently.
578 */
579 kpb_change_state(kpb, KPB_STATE_RESETTING);
580 ret = -EBUSY;
581 break;
582 case KPB_STATE_DISABLED:
583 case KPB_STATE_CREATED:
584 /* Nothing to reset */
585 ret = comp_set_state(dev, COMP_TRIGGER_RESET);
586 break;
587 default:
588 kpb->hd.buffered = 0;
589
590 if (kpb->hd.c_hb) {
591 /* Reset history buffer - zero its data, reset pointers
592 * and states.
593 */
594 kpb_reset_history_buffer(kpb->hd.c_hb);
595 }
596
597 /* Unregister KPB from notifications */
598 notifier_unregister(dev, NULL, NOTIFIER_ID_KPB_CLIENT_EVT);
599 /* Finally KPB is ready after reset */
600 kpb_change_state(kpb, KPB_STATE_PREPARING);
601
602 ret = comp_set_state(dev, COMP_TRIGGER_RESET);
603 break;
604 }
605
606 return ret;
607 }
608
609 /**
610 * \brief Copy real time input stream into sink buffer,
611 * and in the same time buffers that input for
612 * later use by some of clients.
613 *
614 *\param[in] dev - kpb component device pointer.
615 *
616 * \return integer representing either:
617 * 0 - success
618 * -EINVAL - failure.
619 */
kpb_copy(struct comp_dev * dev)620 static int kpb_copy(struct comp_dev *dev)
621 {
622 int ret = 0;
623 struct comp_data *kpb = comp_get_drvdata(dev);
624 struct comp_buffer *source = NULL;
625 struct comp_buffer *sink = NULL;
626 size_t copy_bytes = 0;
627 size_t sample_width = kpb->config.sampling_width;
628 uint32_t flags = 0;
629 struct draining_data *dd = &kpb->draining_task_data;
630 uint32_t avail_bytes;
631
632 comp_dbg(dev, "kpb_copy()");
633
634 /* Get source and sink buffers */
635 source = list_first_item(&dev->bsource_list, struct comp_buffer,
636 sink_list);
637
638 if (!source) {
639 comp_err(dev, "kpb_copy(): no source.");
640 ret = -EINVAL;
641 goto out;
642 }
643
644 buffer_lock(source, &flags);
645
646 /* Validate source */
647 if (!source->stream.r_ptr) {
648 comp_err(dev, "kpb_copy(): invalid source pointers.");
649 ret = -EINVAL;
650 buffer_unlock(source, flags);
651 goto out;
652 }
653
654 buffer_unlock(source, flags);
655
656 switch (kpb->state) {
657 case KPB_STATE_RUN:
658 /* In normal RUN state we simply copy to our sink. */
659 sink = kpb->sel_sink;
660 ret = PPL_STATUS_PATH_STOP;
661
662 if (!sink) {
663 comp_err(dev, "kpb_copy(): no sink.");
664 ret = -EINVAL;
665 goto out;
666 }
667
668 buffer_lock(sink, &flags);
669
670 /* Validate sink */
671 if (!sink->stream.w_ptr) {
672 comp_err(dev, "kpb_copy(): invalid selector sink pointers.");
673 ret = -EINVAL;
674 buffer_unlock(sink, flags);
675 goto out;
676 }
677
678 buffer_unlock(sink, flags);
679
680 copy_bytes = audio_stream_get_copy_bytes(&source->stream, &sink->stream);
681 if (!copy_bytes) {
682 comp_err(dev, "kpb_copy(): nothing to copy sink->free %d source->avail %d",
683 audio_stream_get_free_bytes(&sink->stream),
684 audio_stream_get_avail_bytes(&source->stream));
685 ret = PPL_STATUS_PATH_STOP;
686 goto out;
687 }
688
689 kpb_copy_samples(sink, source, copy_bytes, sample_width);
690
691 /* Buffer source data internally in history buffer for future
692 * use by clients.
693 */
694 if (audio_stream_get_avail_bytes(&source->stream) <= kpb->hd.buffer_size) {
695 ret = kpb_buffer_data(dev, source, copy_bytes);
696 if (ret) {
697 comp_err(dev, "kpb_copy(): internal buffering failed.");
698 goto out;
699 } else {
700 ret = PPL_STATUS_PATH_STOP;
701 }
702
703 /* Update buffered size. NOTE! We only record buffered
704 * data up to the size of history buffer.
705 */
706 kpb->hd.buffered += MIN(kpb->hd.buffer_size -
707 kpb->hd.buffered,
708 copy_bytes);
709 } else {
710 comp_err(dev, "kpb_copy(): too much data to buffer.");
711 }
712
713 comp_update_buffer_produce(sink, copy_bytes);
714 comp_update_buffer_consume(source, copy_bytes);
715
716 break;
717 case KPB_STATE_HOST_COPY:
718 /* In host copy state we only copy to host buffer. */
719 sink = kpb->host_sink;
720
721 if (!sink) {
722 comp_err(dev, "kpb_copy(): no sink.");
723 ret = -EINVAL;
724 goto out;
725 }
726
727 buffer_lock(sink, &flags);
728
729 /* Validate sink */
730 if (!sink->stream.w_ptr) {
731 comp_err(dev, "kpb_copy(): invalid host sink pointers.");
732 ret = -EINVAL;
733 buffer_unlock(sink, flags);
734 goto out;
735 }
736
737 buffer_unlock(sink, flags);
738
739 copy_bytes = audio_stream_get_copy_bytes(&source->stream, &sink->stream);
740 if (!copy_bytes) {
741 comp_err(dev, "kpb_copy(): nothing to copy sink->free %d source->avail %d",
742 audio_stream_get_free_bytes(&sink->stream),
743 audio_stream_get_avail_bytes(&source->stream));
744 /* NOTE! We should stop further pipeline copy due to
745 * no data availability however due to HW bug
746 * (no HOST DMA IRQs) we need to call host copy
747 * anyway so it can update its pointers.
748 */
749 goto out;
750 }
751
752 kpb_copy_samples(sink, source, copy_bytes, sample_width);
753
754 comp_update_buffer_produce(sink, copy_bytes);
755 comp_update_buffer_consume(source, copy_bytes);
756
757 break;
758 case KPB_STATE_INIT_DRAINING:
759 case KPB_STATE_DRAINING:
760 /* In draining and init draining we only buffer data in
761 * the internal history buffer.
762 */
763 avail_bytes = audio_stream_get_avail_bytes(&source->stream);
764 copy_bytes = MIN(avail_bytes, kpb->hd.free);
765 ret = PPL_STATUS_PATH_STOP;
766 if (copy_bytes) {
767 buffer_invalidate(source, copy_bytes);
768 ret = kpb_buffer_data(dev, source, copy_bytes);
769 dd->buffered_while_draining += copy_bytes;
770 kpb->hd.free -= copy_bytes;
771
772 if (ret) {
773 comp_err(dev, "kpb_copy(): internal buffering failed.");
774 goto out;
775 }
776
777 comp_update_buffer_consume(source, copy_bytes);
778 } else {
779 comp_warn(dev, "kpb_copy(): buffering skipped (no data to copy, avail %d, free %d",
780 audio_stream_get_avail_bytes(&source->stream),
781 kpb->hd.free);
782 }
783
784 break;
785 default:
786 comp_cl_err(&comp_kpb, "kpb_copy(): wrong state (state %d, state log %x)",
787 kpb->state, kpb->state_log);
788 ret = -EIO;
789 break;
790 }
791
792 out:
793 return ret;
794 }
795
796 /**
797 * \brief Buffer real time data stream in
798 * the internal buffer.
799 *
800 * \param[in] dev - KPB component data pointer.
801 * \param[in] source - pointer to the buffer source.
802 *
803 */
kpb_buffer_data(struct comp_dev * dev,const struct comp_buffer * source,size_t size)804 static int kpb_buffer_data(struct comp_dev *dev,
805 const struct comp_buffer *source, size_t size)
806 {
807 int ret = 0;
808 size_t size_to_copy = size;
809 size_t space_avail;
810 struct comp_data *kpb = comp_get_drvdata(dev);
811 struct history_buffer *buff = kpb->hd.c_hb;
812 uint32_t offset = 0;
813 uint64_t timeout = 0;
814 uint64_t current_time;
815 enum kpb_state state_preserved = kpb->state;
816 size_t sample_width = kpb->config.sampling_width;
817 struct timer *timer = timer_get();
818
819 comp_dbg(dev, "kpb_buffer_data()");
820
821 /* We are allowed to buffer data in internal history buffer
822 * only in KPB_STATE_RUN, KPB_STATE_DRAINING or KPB_STATE_INIT_DRAINING
823 * states.
824 */
825 if (kpb->state != KPB_STATE_RUN &&
826 kpb->state != KPB_STATE_DRAINING &&
827 kpb->state != KPB_STATE_INIT_DRAINING) {
828 comp_err(dev, "kpb_buffer_data(): wrong state! (current state %d, state log %x)",
829 kpb->state, kpb->state_log);
830 return PPL_STATUS_PATH_STOP;
831 }
832
833 kpb_change_state(kpb, KPB_STATE_BUFFERING);
834
835 timeout = platform_timer_get(timer) +
836 clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, 1);
837 /* Let's store audio stream data in internal history buffer */
838 while (size_to_copy) {
839 /* Reset was requested, it's time to stop buffering and finish
840 * KPB reset.
841 */
842 if (kpb->state == KPB_STATE_RESETTING) {
843 kpb_change_state(kpb, KPB_STATE_RESET_FINISHING);
844 kpb_reset(dev);
845 return PPL_STATUS_PATH_STOP;
846 }
847
848 /* Are we stuck in buffering? */
849 current_time = platform_timer_get(timer);
850 if (timeout < current_time) {
851 if (current_time - timeout <= UINT_MAX)
852 comp_err(dev,
853 "kpb_buffer_data(): timeout of %u [ms] (current state %d, state log %x)",
854 (unsigned int)(current_time - timeout), kpb->state,
855 kpb->state_log);
856 else
857 comp_err(dev,
858 "kpb_buffer_data(): timeout > %u [ms] (current state %d, state log %x)",
859 UINT_MAX, kpb->state,
860 kpb->state_log);
861 return -ETIME;
862 }
863
864 /* Check how much space there is in current write buffer */
865 space_avail = (uintptr_t)buff->end_addr - (uintptr_t)buff->w_ptr;
866
867 if (size_to_copy > space_avail) {
868 /* We have more data to copy than available space
869 * in this buffer, copy what's available and continue
870 * with next buffer.
871 */
872 kpb_buffer_samples(&source->stream, offset, buff->w_ptr,
873 space_avail, sample_width);
874 /* Update write pointer & requested copy size */
875 buff->w_ptr = (char *)buff->w_ptr + space_avail;
876 size_to_copy = size_to_copy - space_avail;
877 /* Update read pointer's offset before continuing
878 * with next buffer.
879 */
880 offset += space_avail;
881 } else {
882 /* Requested size is smaller or equal to the space
883 * available in this buffer. In this scenario simply
884 * copy what was requested.
885 */
886 kpb_buffer_samples(&source->stream, offset, buff->w_ptr,
887 size_to_copy, sample_width);
888 /* Update write pointer & requested copy size */
889 buff->w_ptr = (char *)buff->w_ptr + size_to_copy;
890 /* Reset requested copy size */
891 size_to_copy = 0;
892 }
893 /* Have we filled whole buffer? */
894 if (buff->w_ptr == buff->end_addr) {
895 /* Reset write pointer back to the beginning
896 * of the buffer.
897 */
898 buff->w_ptr = buff->start_addr;
899 /* If we have more buffers use them */
900 if (buff->next && buff->next != buff) {
901 /* Mark current buffer FULL */
902 buff->state = KPB_BUFFER_FULL;
903 /* Use next buffer available on the list
904 * of buffers.
905 */
906 buff = buff->next;
907 /* Update also component container,
908 * so next time we enter buffering function
909 * we will know right away what is the current
910 * write buffer
911 */
912 kpb->hd.c_hb = buff;
913 }
914 /* Mark buffer as FREE */
915 buff->state = KPB_BUFFER_FREE;
916 }
917 }
918
919 kpb_change_state(kpb, state_preserved);
920 return ret;
921 }
922
923 /**
924 * \brief Main event dispatcher.
925 * \param[in] arg - KPB component internal data.
926 * \param[in] type - notification type
927 * \param[in] event_data - event specific data.
928 * \return none.
929 */
kpb_event_handler(void * arg,enum notify_id type,void * event_data)930 static void kpb_event_handler(void *arg, enum notify_id type, void *event_data)
931 {
932 struct comp_dev *dev = arg;
933 struct comp_data *kpb = comp_get_drvdata(dev);
934 struct kpb_event_data *evd = event_data;
935 struct kpb_client *cli = evd->client_data;
936
937 comp_info(dev, "kpb_event_handler(): received event with ID: %d ",
938 evd->event_id);
939
940 switch (evd->event_id) {
941 case KPB_EVENT_REGISTER_CLIENT:
942 kpb_register_client(kpb, cli);
943 break;
944 case KPB_EVENT_UNREGISTER_CLIENT:
945 /*TODO*/
946 break;
947 case KPB_EVENT_BEGIN_DRAINING:
948 kpb_init_draining(dev, cli);
949 break;
950 case KPB_EVENT_STOP_DRAINING:
951 /*TODO*/
952 break;
953 default:
954 comp_err(dev, "kpb_cmd(): unsupported command");
955 break;
956 }
957 }
958
959 /**
960 * \brief Register clients in the system.
961 *
962 * \param[in] kpb - kpb device component pointer.
963 * \param[in] cli - pointer to KPB client's data.
964 *
965 * \return integer representing either:
966 * 0 - success
967 * -EINVAL - failure.
968 */
kpb_register_client(struct comp_data * kpb,struct kpb_client * cli)969 static int kpb_register_client(struct comp_data *kpb, struct kpb_client *cli)
970 {
971 int ret = 0;
972
973 comp_cl_info(&comp_kpb, "kpb_register_client()");
974
975 if (!cli) {
976 comp_cl_err(&comp_kpb, "kpb_register_client(): no client data");
977 return -EINVAL;
978 }
979 /* Do we have a room for a new client? */
980 if (kpb->kpb_no_of_clients >= KPB_MAX_NO_OF_CLIENTS ||
981 cli->id >= KPB_MAX_NO_OF_CLIENTS) {
982 comp_cl_err(&comp_kpb, "kpb_register_client(): no free room for client = %u ",
983 cli->id);
984 ret = -EINVAL;
985 } else if (kpb->clients[cli->id].state != KPB_CLIENT_UNREGISTERED) {
986 comp_cl_err(&comp_kpb, "kpb_register_client(): client = %u already registered",
987 cli->id);
988 ret = -EINVAL;
989 } else {
990 /* Client accepted, let's store his data */
991 kpb->clients[cli->id].id = cli->id;
992 kpb->clients[cli->id].drain_req = cli->drain_req;
993 kpb->clients[cli->id].sink = cli->sink;
994 kpb->clients[cli->id].r_ptr = NULL;
995 kpb->clients[cli->id].state = KPB_CLIENT_BUFFERING;
996 kpb->kpb_no_of_clients++;
997 ret = 0;
998 }
999
1000 return ret;
1001 }
1002
1003 /**
1004 * \brief Prepare history buffer for draining.
1005 *
1006 * \param[in] dev - kpb component data.
1007 * \param[in] cli - client's data.
1008 *
1009 */
kpb_init_draining(struct comp_dev * dev,struct kpb_client * cli)1010 static void kpb_init_draining(struct comp_dev *dev, struct kpb_client *cli)
1011 {
1012 struct comp_data *kpb = comp_get_drvdata(dev);
1013 bool is_sink_ready = (kpb->host_sink->sink->state == COMP_STATE_ACTIVE);
1014 size_t sample_width = kpb->config.sampling_width;
1015 size_t drain_req = cli->drain_req * kpb->config.channels *
1016 (kpb->config.sampling_freq / 1000) *
1017 (KPB_SAMPLE_CONTAINER_SIZE(sample_width) / 8);
1018 struct history_buffer *buff = kpb->hd.c_hb;
1019 struct history_buffer *first_buff = buff;
1020 size_t buffered = 0;
1021 size_t local_buffered;
1022 size_t drain_interval;
1023 size_t host_period_size = kpb->host_period_size;
1024 size_t bytes_per_ms = KPB_SAMPLES_PER_MS *
1025 (KPB_SAMPLE_CONTAINER_SIZE(sample_width) / 8) *
1026 kpb->config.channels;
1027 size_t period_bytes_limit;
1028 uint32_t flags;
1029
1030 comp_info(dev, "kpb_init_draining(): requested draining of %d [ms] from history buffer",
1031 cli->drain_req);
1032
1033 if (kpb->state != KPB_STATE_RUN) {
1034 comp_err(dev, "kpb_init_draining(): wrong KPB state");
1035 } else if (cli->id > KPB_MAX_NO_OF_CLIENTS) {
1036 comp_err(dev, "kpb_init_draining(): wrong client id");
1037 /* TODO: check also if client is registered */
1038 } else if (!is_sink_ready) {
1039 comp_err(dev, "kpb_init_draining(): sink not ready for draining");
1040 } else if (kpb->hd.buffered < drain_req ||
1041 cli->drain_req > KPB_MAX_DRAINING_REQ) {
1042 comp_cl_err(&comp_kpb, "kpb_init_draining(): not enough data in history buffer");
1043 } else {
1044 /* Draining accepted, find proper buffer to start reading
1045 * At this point we are guaranteed that there is enough data
1046 * in the history buffer. All we have to do now is to calculate
1047 * read pointer from which we will start draining.
1048 */
1049 spin_lock_irq(&kpb->lock, flags);
1050
1051 kpb_change_state(kpb, KPB_STATE_INIT_DRAINING);
1052
1053 /* Set history buffer size so new data won't overwrite those
1054 * staged for draining.
1055 */
1056 kpb->hd.free = kpb->hd.buffer_size - drain_req;
1057
1058 /* Find buffer to start draining from */
1059 do {
1060 /* Calculate how much data we have stored in
1061 * current buffer.
1062 */
1063 buff->r_ptr = buff->start_addr;
1064 if (buff->state == KPB_BUFFER_FREE) {
1065 local_buffered = (uintptr_t)buff->w_ptr -
1066 (uintptr_t)buff->start_addr;
1067 buffered += local_buffered;
1068 } else if (buff->state == KPB_BUFFER_FULL) {
1069 local_buffered = (uintptr_t)buff->end_addr -
1070 (uintptr_t)buff->start_addr;
1071 buffered += local_buffered;
1072 } else {
1073 comp_err(dev, "kpb_init_draining(): incorrect buffer label");
1074 }
1075 /* Check if this is already sufficient to start draining
1076 * if not, go to previous buffer and continue
1077 * calculations.
1078 */
1079 if (drain_req > buffered) {
1080 if (buff->prev == first_buff) {
1081 /* We went full circle and still don't
1082 * have sufficient data for draining.
1083 * That means we need to look up the
1084 * first buffer again. Our read pointer
1085 * is somewhere between write pointer
1086 * and buffer's end address.
1087 */
1088 buff = buff->prev;
1089 buffered += (uintptr_t)buff->end_addr -
1090 (uintptr_t)buff->w_ptr;
1091 buff->r_ptr = (char *)buff->w_ptr +
1092 (buffered - drain_req);
1093 break;
1094 }
1095 buff = buff->prev;
1096 } else if (drain_req == buffered) {
1097 buff->r_ptr = buff->start_addr;
1098 break;
1099 } else {
1100 buff->r_ptr = (char *)buff->start_addr +
1101 (buffered - drain_req);
1102 break;
1103 }
1104
1105 } while (buff != first_buff);
1106
1107 spin_unlock_irq(&kpb->lock, flags);
1108
1109 /* Should we drain in synchronized mode (sync_draining_mode)?
1110 * Note! We have already verified host params during
1111 * kpb_prepare().
1112 */
1113 if (kpb->sync_draining_mode) {
1114 /* Calculate time in clock ticks each draining event
1115 * shall take place. This time will be used to
1116 * synchronize us with application interrupts.
1117 */
1118 drain_interval = clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK,
1119 host_period_size / bytes_per_ms) /
1120 KPB_DRAIN_NUM_OF_PPL_PERIODS_AT_ONCE;
1121 period_bytes_limit = host_period_size;
1122 comp_info(dev, "kpb_init_draining(): sync_draining_mode selected with interval %u [uS].",
1123 (unsigned int)(drain_interval * 1000 /
1124 clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, 1)));
1125 } else {
1126 /* Unlimited draining */
1127 drain_interval = 0;
1128 period_bytes_limit = 0;
1129 comp_info(dev, "kpb_init_draining: unlimited draining speed selected.");
1130 }
1131
1132 comp_info(dev, "kpb_init_draining(), schedule draining task");
1133
1134 /* Add one-time draining task into the scheduler. */
1135 kpb->draining_task_data.sink = kpb->host_sink;
1136 kpb->draining_task_data.hb = buff;
1137 kpb->draining_task_data.drain_req = drain_req;
1138 kpb->draining_task_data.sample_width = sample_width;
1139 kpb->draining_task_data.drain_interval = drain_interval;
1140 kpb->draining_task_data.pb_limit = period_bytes_limit;
1141 kpb->draining_task_data.dev = dev;
1142 kpb->draining_task_data.sync_mode_on = kpb->sync_draining_mode;
1143
1144 /* save current sink copy type */
1145 comp_get_attribute(kpb->host_sink->sink, COMP_ATTR_COPY_TYPE,
1146 &kpb->draining_task_data.copy_type);
1147
1148 if (kpb->force_copy_type != COMP_COPY_INVALID)
1149 comp_set_attribute(kpb->host_sink->sink, COMP_ATTR_COPY_TYPE,
1150 &kpb->force_copy_type);
1151
1152 /* Pause selector copy. */
1153 kpb->sel_sink->sink->state = COMP_STATE_PAUSED;
1154
1155 /* Schedule draining task */
1156 schedule_task(&kpb->draining_task, 0, 0);
1157 }
1158 }
1159
1160 /**
1161 * \brief Draining task.
1162 *
1163 * \param[in] arg - pointer keeping drainig data previously prepared
1164 * by kpb_init_draining().
1165 *
1166 * \return none.
1167 */
kpb_draining_task(void * arg)1168 static enum task_state kpb_draining_task(void *arg)
1169 {
1170 struct draining_data *draining_data = (struct draining_data *)arg;
1171 struct comp_buffer *sink = draining_data->sink;
1172 struct history_buffer *buff = draining_data->hb;
1173 size_t drain_req = draining_data->drain_req;
1174 size_t sample_width = draining_data->sample_width;
1175 size_t size_to_read;
1176 size_t size_to_copy;
1177 bool move_buffer = false;
1178 uint32_t drained = 0;
1179 uint64_t draining_time_start;
1180 uint64_t draining_time_end;
1181 uint64_t draining_time_ms;
1182 uint64_t drain_interval = draining_data->drain_interval;
1183 uint64_t next_copy_time = 0;
1184 uint64_t current_time;
1185 size_t period_bytes = 0;
1186 size_t period_bytes_limit = draining_data->pb_limit;
1187 struct timer *timer = timer_get();
1188 size_t period_copy_start = platform_timer_get(timer);
1189 size_t time_taken;
1190 size_t *rt_stream_update = &draining_data->buffered_while_draining;
1191 struct comp_data *kpb = comp_get_drvdata(draining_data->dev);
1192 bool sync_mode_on = draining_data->sync_mode_on;
1193 bool pm_is_active;
1194 uint32_t flags;
1195
1196 comp_cl_info(&comp_kpb, "kpb_draining_task(), start.");
1197
1198 pm_is_active = pm_runtime_is_active(PM_RUNTIME_DSP, PLATFORM_PRIMARY_CORE_ID);
1199
1200 if (!pm_is_active)
1201 pm_runtime_disable(PM_RUNTIME_DSP, PLATFORM_PRIMARY_CORE_ID);
1202
1203 /* Change KPB internal state to DRAINING */
1204 kpb_change_state(kpb, KPB_STATE_DRAINING);
1205
1206 draining_time_start = platform_timer_get(timer);
1207
1208 while (drain_req > 0) {
1209 /* Have we received reset request? */
1210 if (kpb->state == KPB_STATE_RESETTING) {
1211 kpb_change_state(kpb, KPB_STATE_RESET_FINISHING);
1212 kpb_reset(draining_data->dev);
1213 goto out;
1214 }
1215 /* Are we ready to drain further or host still need some time
1216 * to read the data already provided?
1217 */
1218 if (sync_mode_on &&
1219 next_copy_time > platform_timer_get(timer)) {
1220 period_bytes = 0;
1221 period_copy_start = platform_timer_get(timer);
1222 continue;
1223 } else if (next_copy_time == 0) {
1224 period_copy_start = platform_timer_get(timer);
1225 }
1226
1227 size_to_read = (uintptr_t)buff->end_addr - (uintptr_t)buff->r_ptr;
1228
1229 if (size_to_read > audio_stream_get_free_bytes(&sink->stream)) {
1230 if (audio_stream_get_free_bytes(&sink->stream) >= drain_req)
1231 size_to_copy = drain_req;
1232 else
1233 size_to_copy = audio_stream_get_free_bytes(&sink->stream);
1234 } else {
1235 if (size_to_read > drain_req) {
1236 size_to_copy = drain_req;
1237 } else {
1238 size_to_copy = size_to_read;
1239 move_buffer = true;
1240 }
1241 }
1242
1243 kpb_drain_samples(buff->r_ptr, &sink->stream, size_to_copy,
1244 sample_width);
1245
1246 buff->r_ptr = (char *)buff->r_ptr + (uint32_t)size_to_copy;
1247 drain_req -= size_to_copy;
1248 drained += size_to_copy;
1249 period_bytes += size_to_copy;
1250 kpb->hd.free += MIN(kpb->hd.buffer_size -
1251 kpb->hd.free, size_to_copy);
1252
1253 if (move_buffer) {
1254 buff->r_ptr = buff->start_addr;
1255 buff = buff->next;
1256 move_buffer = false;
1257 }
1258
1259 if (size_to_copy) {
1260 comp_update_buffer_produce(sink, size_to_copy);
1261 comp_copy(sink->sink);
1262 } else if (!audio_stream_get_free_bytes(&sink->stream)) {
1263 /* There is no free space in sink buffer.
1264 * Call .copy() on sink component so it can
1265 * process its data further.
1266 */
1267 comp_copy(sink->sink);
1268 }
1269
1270 if (sync_mode_on && period_bytes >= period_bytes_limit) {
1271 current_time = platform_timer_get(timer);
1272 time_taken = current_time - period_copy_start;
1273 next_copy_time = current_time + drain_interval -
1274 time_taken;
1275 }
1276
1277 if (drain_req == 0) {
1278 /* We have finished draining of requested data however
1279 * while we were draining real time stream could provided
1280 * new data which needs to be copy to host.
1281 */
1282 comp_cl_info(&comp_kpb, "kpb: update drain_req by %d",
1283 *rt_stream_update);
1284 spin_lock_irq(&kpb->lock, flags);
1285 drain_req += *rt_stream_update;
1286 *rt_stream_update = 0;
1287 if (!drain_req && kpb->state == KPB_STATE_DRAINING) {
1288 /* Draining is done. Now switch KPB to copy real time
1289 * stream to client's sink. This state is called
1290 * "draining on demand"
1291 * Note! If KPB state changed during draining due to
1292 * i.e reset request we should not change that state.
1293 */
1294 kpb_change_state(kpb, KPB_STATE_HOST_COPY);
1295 }
1296 spin_unlock_irq(&kpb->lock, flags);
1297 }
1298 }
1299
1300 out:
1301 draining_time_end = platform_timer_get(timer);
1302
1303 /* Reset host-sink copy mode back to its pre-draining value */
1304 comp_set_attribute(kpb->host_sink->sink, COMP_ATTR_COPY_TYPE,
1305 &kpb->draining_task_data.copy_type);
1306
1307 draining_time_ms = (draining_time_end - draining_time_start)
1308 / clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, 1);
1309 if (draining_time_ms <= UINT_MAX)
1310 comp_cl_info(&comp_kpb, "KPB: kpb_draining_task(), done. %u drained in %u ms",
1311 drained, (unsigned int)draining_time_ms);
1312 else
1313 comp_cl_info(&comp_kpb, "KPB: kpb_draining_task(), done. %u drained in > %u ms",
1314 drained, UINT_MAX);
1315
1316 return SOF_TASK_STATE_COMPLETED;
1317 }
1318
1319 /**
1320 * \brief Drain data samples safe, according to configuration.
1321 *
1322 * \param[in] sink - pointer to sink buffer.
1323 * \param[in] source - pointer to source buffer.
1324 * \param[in] size - requested copy size in bytes.
1325 *
1326 * \return none.
1327 */
kpb_drain_samples(void * source,struct audio_stream * sink,size_t size,size_t sample_width)1328 static void kpb_drain_samples(void *source, struct audio_stream *sink,
1329 size_t size, size_t sample_width)
1330 {
1331 #if CONFIG_FORMAT_S16LE || CONFIG_FORMAT_S24LE || CONFIG_FORMAT_S32LE
1332 void *dst;
1333 void *src = source;
1334 size_t i;
1335 size_t j = 0;
1336 size_t channel;
1337 size_t frames = KPB_BYTES_TO_FRAMES(size, sample_width);
1338 #endif
1339
1340 switch (sample_width) {
1341 #if CONFIG_FORMAT_S16LE
1342 case 16:
1343 for (i = 0; i < frames; i++) {
1344 for (channel = 0; channel < KPB_NUM_OF_CHANNELS; channel++) {
1345 dst = audio_stream_write_frag_s16(sink, j);
1346 *((int16_t *)dst) = *((int16_t *)src);
1347 src = ((int16_t *)src) + 1;
1348 j++;
1349 }
1350 }
1351 break;
1352 #endif /* CONFIG_FORMAT_S16LE */
1353 #if CONFIG_FORMAT_S24LE || CONFIG_FORMAT_S32LE
1354 case 24:
1355 case 32:
1356 for (i = 0; i < frames; i++) {
1357 for (channel = 0; channel < KPB_NUM_OF_CHANNELS; channel++) {
1358 dst = audio_stream_write_frag_s32(sink, j);
1359 *((int32_t *)dst) = *((int32_t *)src);
1360 src = ((int32_t *)src) + 1;
1361 j++;
1362 }
1363 }
1364 break;
1365 #endif /* CONFIG_FORMAT_S24LE || CONFIG_FORMAT_S32LE */
1366 default:
1367 comp_cl_err(&comp_kpb, "KPB: An attempt to copy not supported format!");
1368 return;
1369 }
1370 }
1371
1372 /**
1373 * \brief Buffers data samples safe, according to configuration.
1374 * \param[in,out] source Pointer to source buffer.
1375 * \param[in] start Start offset of source buffer in bytes.
1376 * \param[in,out] sink Pointer to sink buffer.
1377 * \param[in] size Requested copy size in bytes.
1378 * \param[in] sample_width Sample size.
1379 */
kpb_buffer_samples(const struct audio_stream * source,uint32_t start,void * sink,size_t size,size_t sample_width)1380 static void kpb_buffer_samples(const struct audio_stream *source,
1381 uint32_t start, void *sink, size_t size,
1382 size_t sample_width)
1383 {
1384 void *src;
1385 void *dst = sink;
1386 size_t i;
1387 size_t j = start /
1388 (sample_width == 16 ? sizeof(int16_t) : sizeof(int32_t));
1389 size_t channel;
1390 size_t frames = KPB_BYTES_TO_FRAMES(size, sample_width);
1391
1392 for (i = 0; i < frames; i++) {
1393 for (channel = 0; channel < KPB_NUM_OF_CHANNELS; channel++) {
1394 switch (sample_width) {
1395 case 16:
1396 src = audio_stream_read_frag_s16(source, j);
1397 *((int16_t *)dst) = *((int16_t *)src);
1398 dst = ((int16_t *)dst) + 1;
1399 break;
1400 #if CONFIG_FORMAT_S24LE || CONFIG_FORMAT_S32LE
1401 case 24:
1402 case 32:
1403 src = audio_stream_read_frag_s32(source, j);
1404 *((int32_t *)dst) = *((int32_t *)src);
1405 dst = ((int32_t *)dst) + 1;
1406 break;
1407 #endif /* CONFIG_FORMAT_S24LE || CONFIG_FORMAT_S32LE*/
1408 default:
1409 comp_cl_err(&comp_kpb, "KPB: An attempt to copy not supported format!");
1410 return;
1411 }
1412 j++;
1413 }
1414 }
1415 }
1416
1417 /**
1418 * \brief Initialize history buffer by zeroing its memory.
1419 * \param[in] buff - pointer to current history buffer.
1420 *
1421 * \return: none.
1422 */
kpb_clear_history_buffer(struct history_buffer * buff)1423 static void kpb_clear_history_buffer(struct history_buffer *buff)
1424 {
1425 struct history_buffer *first_buff = buff;
1426 void *start_addr;
1427 size_t size;
1428
1429 comp_cl_info(&comp_kpb, "kpb_clear_history_buffer()");
1430
1431 do {
1432 start_addr = buff->start_addr;
1433 size = (uintptr_t)buff->end_addr - (uintptr_t)start_addr;
1434
1435 bzero(start_addr, size);
1436
1437 buff = buff->next;
1438 } while (buff != first_buff);
1439 }
1440
kpb_is_sample_width_supported(uint32_t sampling_width)1441 static inline bool kpb_is_sample_width_supported(uint32_t sampling_width)
1442 {
1443 bool ret;
1444
1445 switch (sampling_width) {
1446 #if CONFIG_FORMAT_S16LE
1447 case 16:
1448 /* FALLTHROUGH */
1449 #endif /* CONFIG_FORMAT_S16LE */
1450 #if CONFIG_FORMAT_S24LE
1451 case 24:
1452 /* FALLTHROUGH */
1453 #endif /* CONFIG_FORMAT_S24LE */
1454 #if CONFIG_FORMAT_S32LE
1455 case 32:
1456 #endif /* CONFIG_FORMAT_S32LE */
1457 ret = true;
1458 break;
1459 default:
1460 ret = false;
1461 break;
1462 }
1463
1464 return ret;
1465 }
1466
1467 /**
1468 * \brief Copy data samples safe, according to configuration.
1469 *
1470 * \param[in] sink - pointer to sink buffer.
1471 * \param[in] source - pointer to source buffer.
1472 * \param[in] size - requested copy size in bytes.
1473 *
1474 * \return none.
1475 */
kpb_copy_samples(struct comp_buffer * sink,struct comp_buffer * source,size_t size,size_t sample_width)1476 static void kpb_copy_samples(struct comp_buffer *sink,
1477 struct comp_buffer *source, size_t size,
1478 size_t sample_width)
1479 {
1480 #if CONFIG_FORMAT_S16LE || CONFIG_FORMAT_S24LE || CONFIG_FORMAT_S32LE
1481 void *dst;
1482 void *src;
1483 #endif
1484 size_t i;
1485 size_t j = 0;
1486 size_t channel;
1487 size_t frames = KPB_BYTES_TO_FRAMES(size, sample_width);
1488 struct audio_stream *istream = &source->stream;
1489 struct audio_stream *ostream = &sink->stream;
1490
1491 buffer_invalidate(source, size);
1492
1493 for (i = 0; i < frames; i++) {
1494 for (channel = 0; channel < KPB_NUM_OF_CHANNELS; channel++) {
1495 switch (sample_width) {
1496 #if CONFIG_FORMAT_S16LE
1497 case 16:
1498 dst = audio_stream_write_frag_s16(ostream, j);
1499 src = audio_stream_read_frag_s16(istream, j);
1500 *((int16_t *)dst) = *((int16_t *)src);
1501 break;
1502 #endif /* CONFIG_FORMAT_S16LE */
1503 #if CONFIG_FORMAT_S24LE || CONFIG_FORMAT_S32LE
1504 case 24:
1505 /* FALLTHROUGH */
1506 case 32:
1507 dst = audio_stream_write_frag_s32(ostream, j);
1508 src = audio_stream_read_frag_s32(istream, j);
1509 *((int32_t *)dst) = *((int32_t *)src);
1510 break;
1511 #endif /* CONFIG_FORMAT_S24LE || CONFIG_FORMAT_S32LE*/
1512 default:
1513 comp_cl_err(&comp_kpb, "KPB: An attempt to copy not supported format!");
1514 return;
1515 }
1516 j++;
1517 }
1518 }
1519
1520 buffer_writeback(sink, size);
1521 }
1522
1523 /**
1524 * \brief Reset history buffer.
1525 * \param[in] buff - pointer to current history buffer.
1526 *
1527 * \return none.
1528 */
kpb_reset_history_buffer(struct history_buffer * buff)1529 static void kpb_reset_history_buffer(struct history_buffer *buff)
1530 {
1531 struct history_buffer *first_buff = buff;
1532
1533 comp_cl_info(&comp_kpb, "kpb_reset_history_buffer()");
1534
1535 if (!buff)
1536 return;
1537
1538 kpb_clear_history_buffer(buff);
1539
1540 do {
1541 buff->w_ptr = buff->start_addr;
1542 buff->r_ptr = buff->start_addr;
1543 buff->state = KPB_BUFFER_FREE;
1544
1545 buff = buff->next;
1546
1547 } while (buff != first_buff);
1548 }
1549
validate_host_params(struct comp_dev * dev,size_t host_period_size,size_t host_buffer_size,size_t hb_size_req)1550 static inline bool validate_host_params(struct comp_dev *dev,
1551 size_t host_period_size,
1552 size_t host_buffer_size,
1553 size_t hb_size_req)
1554 {
1555 /* The aim of this function is to perform basic check of host params
1556 * and reject them if they won't allow for stable draining.
1557 * Note however that this is highly recommended for host buffer to
1558 * be at least twice the history buffer size. This will quarantee
1559 * "safe" draining.
1560 * By safe we mean no XRUNs(host was unable to read data on time),
1561 * or loss of data due to host delayed read. The later condition
1562 * is very likely after wake up from power state like d0ix.
1563 */
1564 struct comp_data *kpb = comp_get_drvdata(dev);
1565 size_t sample_width = kpb->config.sampling_width;
1566 size_t bytes_per_ms = KPB_SAMPLES_PER_MS *
1567 (KPB_SAMPLE_CONTAINER_SIZE(sample_width) / 8) *
1568 kpb->config.channels;
1569 size_t pipeline_period_size = (dev->pipeline->period / 1000)
1570 * bytes_per_ms;
1571
1572 if (!host_period_size || !host_buffer_size) {
1573 /* Wrong host params */
1574 comp_err(dev, "kpb: host_period_size (%d) cannot be 0 and host_buffer_size (%d) cannot be 0",
1575 host_period_size, host_buffer_size);
1576 return false;
1577 } else if (HOST_BUFFER_MIN_SIZE(hb_size_req) >
1578 host_buffer_size) {
1579 /* Host buffer size is too small - history data
1580 * may get overwritten.
1581 */
1582 comp_err(dev, "kpb: host_buffer_size (%d) must be at least %d",
1583 host_buffer_size, HOST_BUFFER_MIN_SIZE(hb_size_req));
1584 return false;
1585 } else if (kpb->sync_draining_mode) {
1586 /* Sync draining allowed. Check if we can perform draining
1587 * with current settings.
1588 * In this mode we copy host period size to host
1589 * (to avoid overwrite of buffered data by real time stream
1590 * this period shall be bigger than pipeline period) and
1591 * give host some time to read it. Therefore, in worst
1592 * case scenario, we copy one period of real time data + some
1593 * of buffered data.
1594 */
1595 if ((host_period_size / KPB_DRAIN_NUM_OF_PPL_PERIODS_AT_ONCE) <
1596 pipeline_period_size) {
1597 comp_err(dev, "kpb: host_period_size (%d) must be at least %d * %d",
1598 host_period_size,
1599 KPB_DRAIN_NUM_OF_PPL_PERIODS_AT_ONCE,
1600 pipeline_period_size);
1601 return false;
1602 }
1603 }
1604
1605 return true;
1606 }
1607
1608 /**
1609 * \brief Change KPB state and log this change internally.
1610 * \param[in] kpb - KPB component data pointer.
1611 * \param[in] state - current KPB state.
1612 *
1613 * \return none.
1614 */
kpb_change_state(struct comp_data * kpb,enum kpb_state state)1615 static inline void kpb_change_state(struct comp_data *kpb,
1616 enum kpb_state state)
1617 {
1618 comp_cl_dbg(&comp_kpb, "kpb_change_state(): from %d to %d",
1619 kpb->state, state);
1620 kpb->state = state;
1621 kpb->state_log = (kpb->state_log << 4) | state;
1622 }
1623
1624 static const struct comp_driver comp_kpb = {
1625 .type = SOF_COMP_KPB,
1626 .uid = SOF_RT_UUID(kpb_uuid),
1627 .tctx = &kpb_tr,
1628 .ops = {
1629 .create = kpb_new,
1630 .free = kpb_free,
1631 .cmd = kpb_cmd,
1632 .trigger = kpb_trigger,
1633 .copy = kpb_copy,
1634 .prepare = kpb_prepare,
1635 .reset = kpb_reset,
1636 .params = kpb_params,
1637 },
1638 };
1639
1640 static SHARED_DATA struct comp_driver_info comp_kpb_info = {
1641 .drv = &comp_kpb,
1642 };
1643
sys_comp_kpb_init(void)1644 UT_STATIC void sys_comp_kpb_init(void)
1645 {
1646 comp_register(platform_shared_get(&comp_kpb_info,
1647 sizeof(comp_kpb_info)));
1648 }
1649
1650 DECLARE_MODULE(sys_comp_kpb_init);
1651