1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 // Copyright(c) 2016 Intel Corporation. All rights reserved.
4 //
5 // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
6 // Keyon Jie <yang.jie@linux.intel.com>
7
8 #include <sof/audio/buffer.h>
9 #include <sof/audio/component.h>
10 #include <sof/common.h>
11 #include <rtos/interrupt.h>
12 #include <rtos/alloc.h>
13 #include <rtos/cache.h>
14 #include <sof/lib/memory.h>
15 #include <sof/lib/notifier.h>
16 #include <sof/list.h>
17 #include <rtos/spinlock.h>
18 #include <ipc/topology.h>
19 #include <errno.h>
20 #include <stddef.h>
21 #include <stdint.h>
22
23 LOG_MODULE_REGISTER(buffer, CONFIG_SOF_LOG_LEVEL);
24
25 /* 42544c92-8e92-4e41-b679-34519f1c1d28 */
26 DECLARE_SOF_RT_UUID("buffer", buffer_uuid, 0x42544c92, 0x8e92, 0x4e41,
27 0xb6, 0x79, 0x34, 0x51, 0x9f, 0x1c, 0x1d, 0x28);
28 DECLARE_TR_CTX(buffer_tr, SOF_UUID(buffer_uuid), LOG_LEVEL_INFO);
29
buffer_alloc(uint32_t size,uint32_t caps,uint32_t align)30 struct comp_buffer *buffer_alloc(uint32_t size, uint32_t caps, uint32_t align)
31 {
32 struct comp_buffer *buffer;
33 struct comp_buffer __sparse_cache *buffer_c;
34
35 tr_dbg(&buffer_tr, "buffer_alloc()");
36
37 /* validate request */
38 if (size == 0) {
39 tr_err(&buffer_tr, "buffer_alloc(): new size = %u is invalid",
40 size);
41 return NULL;
42 }
43
44 /*
45 * allocate new buffer, align the allocation size to a cache line for
46 * the coherent API
47 */
48 buffer = coherent_init_thread(struct comp_buffer, c);
49 if (!buffer) {
50 tr_err(&buffer_tr, "buffer_alloc(): could not alloc structure");
51 return NULL;
52 }
53
54 buffer->stream.addr = rballoc_align(0, caps, size, align);
55 if (!buffer->stream.addr) {
56 rfree(buffer);
57 tr_err(&buffer_tr, "buffer_alloc(): could not alloc size = %u bytes of type = %u",
58 size, caps);
59 return NULL;
60 }
61
62 list_init(&buffer->source_list);
63 list_init(&buffer->sink_list);
64
65 /* From here no more uncached access to the buffer object, except its list headers */
66 buffer_c = buffer_acquire(buffer);
67 buffer_init(buffer_c, size, caps);
68 buffer_release(buffer_c);
69
70 /*
71 * The buffer hasn't yet been marked as shared, hence buffer_release()
72 * hasn't written back and invalidated the cache. Therefore we have to
73 * do this manually now before adding to the lists. Buffer list
74 * structures are always accessed uncached and they're never modified at
75 * run-time, i.e. buffers are never relinked. So we have to make sure,
76 * that what we have written into buffer's cache is in RAM before
77 * modifying that RAM bypassing cache, and that after this cache is
78 * re-loaded again.
79 */
80 dcache_writeback_invalidate_region(uncache_to_cache(buffer), sizeof(*buffer));
81
82 return buffer;
83 }
84
buffer_zero(struct comp_buffer __sparse_cache * buffer)85 void buffer_zero(struct comp_buffer __sparse_cache *buffer)
86 {
87 buf_dbg(buffer, "stream_zero()");
88
89 bzero(buffer->stream.addr, buffer->stream.size);
90 if (buffer->caps & SOF_MEM_CAPS_DMA)
91 dcache_writeback_region((__sparse_force void __sparse_cache *)buffer->stream.addr,
92 buffer->stream.size);
93 }
94
buffer_set_size(struct comp_buffer __sparse_cache * buffer,uint32_t size)95 int buffer_set_size(struct comp_buffer __sparse_cache *buffer, uint32_t size)
96 {
97 void *new_ptr = NULL;
98
99 /* validate request */
100 if (size == 0) {
101 buf_err(buffer, "resize size = %u is invalid", size);
102 return -EINVAL;
103 }
104
105 if (size == buffer->stream.size)
106 return 0;
107
108 new_ptr = rbrealloc(buffer->stream.addr, SOF_MEM_FLAG_NO_COPY,
109 buffer->caps, size, buffer->stream.size);
110
111 /* we couldn't allocate bigger chunk */
112 if (!new_ptr && size > buffer->stream.size) {
113 buf_err(buffer, "resize can't alloc %u bytes type %u",
114 buffer->stream.size, buffer->caps);
115 return -ENOMEM;
116 }
117
118 /* use bigger chunk, else just use the old chunk but set smaller */
119 if (new_ptr)
120 buffer->stream.addr = new_ptr;
121
122 buffer_init(buffer, size, buffer->caps);
123
124 return 0;
125 }
126
buffer_set_params(struct comp_buffer __sparse_cache * buffer,struct sof_ipc_stream_params * params,bool force_update)127 int buffer_set_params(struct comp_buffer __sparse_cache *buffer,
128 struct sof_ipc_stream_params *params, bool force_update)
129 {
130 int ret;
131 int i;
132
133 if (!params) {
134 buf_err(buffer, "buffer_set_params(): !params");
135 return -EINVAL;
136 }
137
138 if (buffer->hw_params_configured && !force_update)
139 return 0;
140
141 ret = audio_stream_set_params(&buffer->stream, params);
142 if (ret < 0) {
143 buf_err(buffer, "buffer_set_params(): audio_stream_set_params failed");
144 return -EINVAL;
145 }
146
147 buffer->buffer_fmt = params->buffer_fmt;
148 for (i = 0; i < SOF_IPC_MAX_CHANNELS; i++)
149 buffer->chmap[i] = params->chmap[i];
150
151 buffer->hw_params_configured = true;
152
153 return 0;
154 }
155
buffer_params_match(struct comp_buffer __sparse_cache * buffer,struct sof_ipc_stream_params * params,uint32_t flag)156 bool buffer_params_match(struct comp_buffer __sparse_cache *buffer,
157 struct sof_ipc_stream_params *params, uint32_t flag)
158 {
159 assert(params);
160
161 if ((flag & BUFF_PARAMS_FRAME_FMT) &&
162 buffer->stream.frame_fmt != params->frame_fmt)
163 return false;
164
165 if ((flag & BUFF_PARAMS_RATE) &&
166 buffer->stream.rate != params->rate)
167 return false;
168
169 if ((flag & BUFF_PARAMS_CHANNELS) &&
170 buffer->stream.channels != params->channels)
171 return false;
172
173 return true;
174 }
175
176 /* free component in the pipeline */
buffer_free(struct comp_buffer * buffer)177 void buffer_free(struct comp_buffer *buffer)
178 {
179 struct buffer_cb_free cb_data = {
180 .buffer = buffer,
181 };
182
183 if (!buffer)
184 return;
185
186 buf_dbg(buffer, "buffer_free()");
187
188 notifier_event(buffer, NOTIFIER_ID_BUFFER_FREE,
189 NOTIFIER_TARGET_CORE_LOCAL, &cb_data, sizeof(cb_data));
190
191 /* In case some listeners didn't unregister from buffer's callbacks */
192 notifier_unregister_all(NULL, buffer);
193
194 rfree(buffer->stream.addr);
195 coherent_free_thread(buffer, c);
196 }
197
198 /*
199 * comp_update_buffer_produce() and comp_update_buffer_consume() send
200 * NOTIFIER_ID_BUFFER_PRODUCE and NOTIFIER_ID_BUFFER_CONSUME notifier events
201 * respectively. The only recipient of those notifications is probes. The
202 * target for those notifications is always the current core, therefore notifier
203 * callbacks will be called synchronously from notifier_event() calls. Therefore
204 * we cannot pass unlocked buffer pointers to probes, because if they try to
205 * acquire the buffer, that can cause a deadlock. In general locked objects
206 * shouldn't be passed to potentially asynchronous contexts, but here we have no
207 * choice but to use our knowledge of the local notifier behaviour and pass
208 * locked buffers to notification recipients.
209 */
comp_update_buffer_produce(struct comp_buffer __sparse_cache * buffer,uint32_t bytes)210 void comp_update_buffer_produce(struct comp_buffer __sparse_cache *buffer, uint32_t bytes)
211 {
212 struct buffer_cb_transact cb_data = {
213 .buffer = buffer,
214 .transaction_amount = bytes,
215 .transaction_begin_address = buffer->stream.w_ptr,
216 };
217
218 /* return if no bytes */
219 if (!bytes) {
220 buf_dbg(buffer, "comp_update_buffer_produce(), no bytes to produce, source->comp.id = %u, source->comp.type = %u, sink->comp.id = %u, sink->comp.type = %u",
221 buffer->source ? dev_comp_id(buffer->source) : (unsigned int)UINT32_MAX,
222 buffer->source ? dev_comp_type(buffer->source) : (unsigned int)UINT32_MAX,
223 buffer->sink ? dev_comp_id(buffer->sink) : (unsigned int)UINT32_MAX,
224 buffer->sink ? dev_comp_type(buffer->sink) : (unsigned int)UINT32_MAX);
225 return;
226 }
227
228 audio_stream_produce(&buffer->stream, bytes);
229
230 /* Notifier looks for the pointer value to match it against registration */
231 notifier_event(cache_to_uncache(buffer), NOTIFIER_ID_BUFFER_PRODUCE,
232 NOTIFIER_TARGET_CORE_LOCAL, &cb_data, sizeof(cb_data));
233
234 buf_dbg(buffer, "comp_update_buffer_produce(), ((buffer->avail << 16) | buffer->free) = %08x, ((buffer->id << 16) | buffer->size) = %08x",
235 (audio_stream_get_avail_bytes(&buffer->stream) << 16) |
236 audio_stream_get_free_bytes(&buffer->stream),
237 (buffer->id << 16) | buffer->stream.size);
238 buf_dbg(buffer, "comp_update_buffer_produce(), ((buffer->r_ptr - buffer->addr) << 16 | (buffer->w_ptr - buffer->addr)) = %08x",
239 ((char *)buffer->stream.r_ptr - (char *)buffer->stream.addr) << 16 |
240 ((char *)buffer->stream.w_ptr - (char *)buffer->stream.addr));
241 }
242
comp_update_buffer_consume(struct comp_buffer __sparse_cache * buffer,uint32_t bytes)243 void comp_update_buffer_consume(struct comp_buffer __sparse_cache *buffer, uint32_t bytes)
244 {
245 struct buffer_cb_transact cb_data = {
246 .buffer = buffer,
247 .transaction_amount = bytes,
248 .transaction_begin_address = buffer->stream.r_ptr,
249 };
250
251 /* return if no bytes */
252 if (!bytes) {
253 buf_dbg(buffer, "comp_update_buffer_consume(), no bytes to consume, source->comp.id = %u, source->comp.type = %u, sink->comp.id = %u, sink->comp.type = %u",
254 buffer->source ? dev_comp_id(buffer->source) : (unsigned int)UINT32_MAX,
255 buffer->source ? dev_comp_type(buffer->source) : (unsigned int)UINT32_MAX,
256 buffer->sink ? dev_comp_id(buffer->sink) : (unsigned int)UINT32_MAX,
257 buffer->sink ? dev_comp_type(buffer->sink) : (unsigned int)UINT32_MAX);
258 return;
259 }
260
261 audio_stream_consume(&buffer->stream, bytes);
262
263 notifier_event(cache_to_uncache(buffer), NOTIFIER_ID_BUFFER_CONSUME,
264 NOTIFIER_TARGET_CORE_LOCAL, &cb_data, sizeof(cb_data));
265
266 buf_dbg(buffer, "comp_update_buffer_consume(), (buffer->avail << 16) | buffer->free = %08x, (buffer->id << 16) | buffer->size = %08x, (buffer->r_ptr - buffer->addr) << 16 | (buffer->w_ptr - buffer->addr)) = %08x",
267 (audio_stream_get_avail_bytes(&buffer->stream) << 16) |
268 audio_stream_get_free_bytes(&buffer->stream),
269 (buffer->id << 16) | buffer->stream.size,
270 ((char *)buffer->stream.r_ptr - (char *)buffer->stream.addr) << 16 |
271 ((char *)buffer->stream.w_ptr - (char *)buffer->stream.addr));
272 }
273
buffer_attach(struct comp_buffer * buffer,struct list_item * head,int dir)274 void buffer_attach(struct comp_buffer *buffer, struct list_item *head, int dir)
275 {
276 struct list_item __sparse_cache *needs_sync;
277 bool further_buffers_exist;
278
279 /*
280 * There can already be buffers on the target list. If we just link this
281 * buffer, we modify the first buffer's list header via uncached alias,
282 * so its cached copy can later be written back, overwriting the
283 * modified header. FIXME: this is still a problem with different cores.
284 */
285 further_buffers_exist = !list_is_empty(head);
286 needs_sync = uncache_to_cache(head->next);
287 if (further_buffers_exist)
288 dcache_writeback_region(needs_sync, sizeof(struct list_item));
289 /* The cache line can be prefetched here, invalidate it after prepending */
290 list_item_prepend(buffer_comp_list(buffer, dir), head);
291 if (further_buffers_exist)
292 dcache_invalidate_region(needs_sync, sizeof(struct list_item));
293 }
294
buffer_detach(struct comp_buffer * buffer,struct list_item * head,int dir)295 void buffer_detach(struct comp_buffer *buffer, struct list_item *head, int dir)
296 {
297 struct list_item __sparse_cache *needs_sync_prev, *needs_sync_next;
298 bool buffers_after_exist, buffers_before_exist;
299 struct list_item *buf_list = buffer_comp_list(buffer, dir);
300
301 /*
302 * There can be more buffers linked together with this one, that will
303 * still be staying on their respective pipelines and might get used via
304 * their cached aliases. If we just unlink this buffer, we modify their
305 * list header via uncached alias, so their cached copy can later be
306 * written back, overwriting the modified header. FIXME: this is still a
307 * problem with different cores.
308 */
309 buffers_after_exist = head != buf_list->next;
310 buffers_before_exist = head != buf_list->prev;
311 needs_sync_prev = uncache_to_cache(buf_list->prev);
312 needs_sync_next = uncache_to_cache(buf_list->next);
313 if (buffers_after_exist)
314 dcache_writeback_region(needs_sync_next, sizeof(struct list_item));
315 if (buffers_before_exist)
316 dcache_writeback_region(needs_sync_prev, sizeof(struct list_item));
317 /* buffers before or after can be prefetched here */
318 list_item_del(buf_list);
319 if (buffers_after_exist)
320 dcache_invalidate_region(needs_sync_next, sizeof(struct list_item));
321 if (buffers_before_exist)
322 dcache_invalidate_region(needs_sync_prev, sizeof(struct list_item));
323 }
324