1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 //
3 // This file is provided under a dual BSD/GPLv2 license. When using or
4 // redistributing this file, you may do so under either license.
5 //
6 // Copyright(c) 2018 Intel Corporation. All rights reserved.
7 //
8 // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9 //
10
11 #include <linux/debugfs.h>
12 #include <linux/sched/signal.h>
13 #include "sof-priv.h"
14 #include "ops.h"
15
sof_trace_avail(struct snd_sof_dev * sdev,loff_t pos,size_t buffer_size)16 static size_t sof_trace_avail(struct snd_sof_dev *sdev,
17 loff_t pos, size_t buffer_size)
18 {
19 loff_t host_offset = READ_ONCE(sdev->host_offset);
20
21 /*
22 * If host offset is less than local pos, it means write pointer of
23 * host DMA buffer has been wrapped. We should output the trace data
24 * at the end of host DMA buffer at first.
25 */
26 if (host_offset < pos)
27 return buffer_size - pos;
28
29 /* If there is available trace data now, it is unnecessary to wait. */
30 if (host_offset > pos)
31 return host_offset - pos;
32
33 return 0;
34 }
35
sof_wait_trace_avail(struct snd_sof_dev * sdev,loff_t pos,size_t buffer_size)36 static size_t sof_wait_trace_avail(struct snd_sof_dev *sdev,
37 loff_t pos, size_t buffer_size)
38 {
39 wait_queue_entry_t wait;
40 size_t ret = sof_trace_avail(sdev, pos, buffer_size);
41
42 /* data immediately available */
43 if (ret)
44 return ret;
45
46 if (!sdev->dtrace_is_enabled && sdev->dtrace_draining) {
47 /*
48 * tracing has ended and all traces have been
49 * read by client, return EOF
50 */
51 sdev->dtrace_draining = false;
52 return 0;
53 }
54
55 /* wait for available trace data from FW */
56 init_waitqueue_entry(&wait, current);
57 set_current_state(TASK_INTERRUPTIBLE);
58 add_wait_queue(&sdev->trace_sleep, &wait);
59
60 if (!signal_pending(current)) {
61 /* set timeout to max value, no error code */
62 schedule_timeout(MAX_SCHEDULE_TIMEOUT);
63 }
64 remove_wait_queue(&sdev->trace_sleep, &wait);
65
66 return sof_trace_avail(sdev, pos, buffer_size);
67 }
68
sof_dfsentry_trace_read(struct file * file,char __user * buffer,size_t count,loff_t * ppos)69 static ssize_t sof_dfsentry_trace_read(struct file *file, char __user *buffer,
70 size_t count, loff_t *ppos)
71 {
72 struct snd_sof_dfsentry *dfse = file->private_data;
73 struct snd_sof_dev *sdev = dfse->sdev;
74 unsigned long rem;
75 loff_t lpos = *ppos;
76 size_t avail, buffer_size = dfse->size;
77 u64 lpos_64;
78
79 /* make sure we know about any failures on the DSP side */
80 sdev->dtrace_error = false;
81
82 /* check pos and count */
83 if (lpos < 0)
84 return -EINVAL;
85 if (!count)
86 return 0;
87
88 /* check for buffer wrap and count overflow */
89 lpos_64 = lpos;
90 lpos = do_div(lpos_64, buffer_size);
91
92 if (count > buffer_size - lpos) /* min() not used to avoid sparse warnings */
93 count = buffer_size - lpos;
94
95 /* get available count based on current host offset */
96 avail = sof_wait_trace_avail(sdev, lpos, buffer_size);
97 if (sdev->dtrace_error) {
98 dev_err(sdev->dev, "error: trace IO error\n");
99 return -EIO;
100 }
101
102 /* make sure count is <= avail */
103 count = avail > count ? count : avail;
104
105 /* copy available trace data to debugfs */
106 rem = copy_to_user(buffer, ((u8 *)(dfse->buf) + lpos), count);
107 if (rem)
108 return -EFAULT;
109
110 *ppos += count;
111
112 /* move debugfs reading position */
113 return count;
114 }
115
sof_dfsentry_trace_release(struct inode * inode,struct file * file)116 static int sof_dfsentry_trace_release(struct inode *inode, struct file *file)
117 {
118 struct snd_sof_dfsentry *dfse = inode->i_private;
119 struct snd_sof_dev *sdev = dfse->sdev;
120
121 /* avoid duplicate traces at next open */
122 if (!sdev->dtrace_is_enabled)
123 sdev->host_offset = 0;
124
125 return 0;
126 }
127
128 static const struct file_operations sof_dfs_trace_fops = {
129 .open = simple_open,
130 .read = sof_dfsentry_trace_read,
131 .llseek = default_llseek,
132 .release = sof_dfsentry_trace_release,
133 };
134
trace_debugfs_create(struct snd_sof_dev * sdev)135 static int trace_debugfs_create(struct snd_sof_dev *sdev)
136 {
137 struct snd_sof_dfsentry *dfse;
138
139 if (!sdev)
140 return -EINVAL;
141
142 dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
143 if (!dfse)
144 return -ENOMEM;
145
146 dfse->type = SOF_DFSENTRY_TYPE_BUF;
147 dfse->buf = sdev->dmatb.area;
148 dfse->size = sdev->dmatb.bytes;
149 dfse->sdev = sdev;
150
151 debugfs_create_file("trace", 0444, sdev->debugfs_root, dfse,
152 &sof_dfs_trace_fops);
153
154 return 0;
155 }
156
snd_sof_init_trace_ipc(struct snd_sof_dev * sdev)157 int snd_sof_init_trace_ipc(struct snd_sof_dev *sdev)
158 {
159 struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
160 struct sof_ipc_fw_version *v = &ready->version;
161 struct sof_ipc_dma_trace_params_ext params;
162 struct sof_ipc_reply ipc_reply;
163 int ret;
164
165 if (!sdev->dtrace_is_supported)
166 return 0;
167
168 if (sdev->dtrace_is_enabled || !sdev->dma_trace_pages)
169 return -EINVAL;
170
171 /* set IPC parameters */
172 params.hdr.cmd = SOF_IPC_GLB_TRACE_MSG;
173 /* PARAMS_EXT is only supported from ABI 3.7.0 onwards */
174 if (v->abi_version >= SOF_ABI_VER(3, 7, 0)) {
175 params.hdr.size = sizeof(struct sof_ipc_dma_trace_params_ext);
176 params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS_EXT;
177 params.timestamp_ns = ktime_get(); /* in nanosecond */
178 } else {
179 params.hdr.size = sizeof(struct sof_ipc_dma_trace_params);
180 params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS;
181 }
182 params.buffer.phy_addr = sdev->dmatp.addr;
183 params.buffer.size = sdev->dmatb.bytes;
184 params.buffer.pages = sdev->dma_trace_pages;
185 params.stream_tag = 0;
186
187 sdev->host_offset = 0;
188 sdev->dtrace_draining = false;
189
190 ret = snd_sof_dma_trace_init(sdev, ¶ms.stream_tag);
191 if (ret < 0) {
192 dev_err(sdev->dev,
193 "error: fail in snd_sof_dma_trace_init %d\n", ret);
194 return ret;
195 }
196 dev_dbg(sdev->dev, "stream_tag: %d\n", params.stream_tag);
197
198 /* send IPC to the DSP */
199 ret = sof_ipc_tx_message(sdev->ipc,
200 params.hdr.cmd, ¶ms, sizeof(params),
201 &ipc_reply, sizeof(ipc_reply));
202 if (ret < 0) {
203 dev_err(sdev->dev,
204 "error: can't set params for DMA for trace %d\n", ret);
205 goto trace_release;
206 }
207
208 ret = snd_sof_dma_trace_trigger(sdev, SNDRV_PCM_TRIGGER_START);
209 if (ret < 0) {
210 dev_err(sdev->dev,
211 "error: snd_sof_dma_trace_trigger: start: %d\n", ret);
212 goto trace_release;
213 }
214
215 sdev->dtrace_is_enabled = true;
216
217 return 0;
218
219 trace_release:
220 snd_sof_dma_trace_release(sdev);
221 return ret;
222 }
223
snd_sof_init_trace(struct snd_sof_dev * sdev)224 int snd_sof_init_trace(struct snd_sof_dev *sdev)
225 {
226 int ret;
227
228 if (!sdev->dtrace_is_supported)
229 return 0;
230
231 /* set false before start initialization */
232 sdev->dtrace_is_enabled = false;
233
234 /* allocate trace page table buffer */
235 ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev,
236 PAGE_SIZE, &sdev->dmatp);
237 if (ret < 0) {
238 dev_err(sdev->dev,
239 "error: can't alloc page table for trace %d\n", ret);
240 return ret;
241 }
242
243 /* allocate trace data buffer */
244 ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
245 DMA_BUF_SIZE_FOR_TRACE, &sdev->dmatb);
246 if (ret < 0) {
247 dev_err(sdev->dev,
248 "error: can't alloc buffer for trace %d\n", ret);
249 goto page_err;
250 }
251
252 /* create compressed page table for audio firmware */
253 ret = snd_sof_create_page_table(sdev->dev, &sdev->dmatb,
254 sdev->dmatp.area, sdev->dmatb.bytes);
255 if (ret < 0)
256 goto table_err;
257
258 sdev->dma_trace_pages = ret;
259 dev_dbg(sdev->dev, "dma_trace_pages: %d\n", sdev->dma_trace_pages);
260
261 if (sdev->first_boot) {
262 ret = trace_debugfs_create(sdev);
263 if (ret < 0)
264 goto table_err;
265 }
266
267 init_waitqueue_head(&sdev->trace_sleep);
268
269 ret = snd_sof_init_trace_ipc(sdev);
270 if (ret < 0)
271 goto table_err;
272
273 return 0;
274 table_err:
275 sdev->dma_trace_pages = 0;
276 snd_dma_free_pages(&sdev->dmatb);
277 page_err:
278 snd_dma_free_pages(&sdev->dmatp);
279 return ret;
280 }
281 EXPORT_SYMBOL(snd_sof_init_trace);
282
snd_sof_trace_update_pos(struct snd_sof_dev * sdev,struct sof_ipc_dma_trace_posn * posn)283 int snd_sof_trace_update_pos(struct snd_sof_dev *sdev,
284 struct sof_ipc_dma_trace_posn *posn)
285 {
286 if (!sdev->dtrace_is_supported)
287 return 0;
288
289 if (sdev->dtrace_is_enabled && sdev->host_offset != posn->host_offset) {
290 sdev->host_offset = posn->host_offset;
291 wake_up(&sdev->trace_sleep);
292 }
293
294 if (posn->overflow != 0)
295 dev_err(sdev->dev,
296 "error: DSP trace buffer overflow %u bytes. Total messages %d\n",
297 posn->overflow, posn->messages);
298
299 return 0;
300 }
301
302 /* an error has occurred within the DSP that prevents further trace */
snd_sof_trace_notify_for_error(struct snd_sof_dev * sdev)303 void snd_sof_trace_notify_for_error(struct snd_sof_dev *sdev)
304 {
305 if (!sdev->dtrace_is_supported)
306 return;
307
308 if (sdev->dtrace_is_enabled) {
309 dev_err(sdev->dev, "error: waking up any trace sleepers\n");
310 sdev->dtrace_error = true;
311 wake_up(&sdev->trace_sleep);
312 }
313 }
314 EXPORT_SYMBOL(snd_sof_trace_notify_for_error);
315
snd_sof_release_trace(struct snd_sof_dev * sdev)316 void snd_sof_release_trace(struct snd_sof_dev *sdev)
317 {
318 int ret;
319
320 if (!sdev->dtrace_is_supported || !sdev->dtrace_is_enabled)
321 return;
322
323 ret = snd_sof_dma_trace_trigger(sdev, SNDRV_PCM_TRIGGER_STOP);
324 if (ret < 0)
325 dev_err(sdev->dev,
326 "error: snd_sof_dma_trace_trigger: stop: %d\n", ret);
327
328 ret = snd_sof_dma_trace_release(sdev);
329 if (ret < 0)
330 dev_err(sdev->dev,
331 "error: fail in snd_sof_dma_trace_release %d\n", ret);
332
333 sdev->dtrace_is_enabled = false;
334 sdev->dtrace_draining = true;
335 wake_up(&sdev->trace_sleep);
336 }
337 EXPORT_SYMBOL(snd_sof_release_trace);
338
snd_sof_free_trace(struct snd_sof_dev * sdev)339 void snd_sof_free_trace(struct snd_sof_dev *sdev)
340 {
341 if (!sdev->dtrace_is_supported)
342 return;
343
344 snd_sof_release_trace(sdev);
345
346 if (sdev->dma_trace_pages) {
347 snd_dma_free_pages(&sdev->dmatb);
348 snd_dma_free_pages(&sdev->dmatp);
349 sdev->dma_trace_pages = 0;
350 }
351 }
352 EXPORT_SYMBOL(snd_sof_free_trace);
353