1 /*
2 * Copyright (c) 2022 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @brief Intel ADSP HDA DMA (Stream) driver
9 *
10 * HDA is effectively, from the DSP, a ringbuffer (fifo) where the read
11 * and write positions are maintained by the hardware and the software may
12 * commit read/writes by writing to another register (DGFPBI) the length of
13 * the read or write.
14 *
15 * It's important that the software knows the position in the ringbuffer to read
16 * or write from. It's also important that the buffer be placed in the correct
17 * memory region and aligned to 128 bytes. Lastly it's important the host and
18 * dsp coordinate the order in which operations takes place. Doing all that
19 * HDA streams are a fantastic bit of hardware and do their job well.
20 *
21 * There are 4 types of streams, with a set of each available to be used to
22 * communicate to or from the Host or Link. Each stream set is uni directional.
23 */
24
25 #include <zephyr/drivers/dma.h>
26
27 #include "dma_intel_adsp_hda.h"
28 #include <intel_adsp_hda.h>
29
intel_adsp_hda_dma_host_in_config(const struct device * dev,uint32_t channel,struct dma_config * dma_cfg)30 int intel_adsp_hda_dma_host_in_config(const struct device *dev,
31 uint32_t channel,
32 struct dma_config *dma_cfg)
33 {
34 const struct intel_adsp_hda_dma_cfg *const cfg = dev->config;
35 struct dma_block_config *blk_cfg;
36 uint8_t *buf;
37 int res;
38
39 __ASSERT(channel < cfg->dma_channels, "Channel does not exist");
40 __ASSERT(dma_cfg->block_count == 1,
41 "HDA does not support scatter gather or chained "
42 "block transfers.");
43 __ASSERT(dma_cfg->channel_direction == cfg->direction,
44 "Unexpected channel direction, HDA host in supports "
45 "MEMORY_TO_HOST");
46
47 blk_cfg = dma_cfg->head_block;
48 buf = (uint8_t *)(uintptr_t)(blk_cfg->source_address);
49 res = intel_adsp_hda_set_buffer(cfg->base, cfg->regblock_size, channel, buf,
50 blk_cfg->block_size);
51
52 if (res == 0) {
53 *DGMBS(cfg->base, cfg->regblock_size, channel) =
54 blk_cfg->block_size & HDA_ALIGN_MASK;
55
56 intel_adsp_hda_set_sample_container_size(cfg->base, cfg->regblock_size, channel,
57 dma_cfg->source_data_size);
58 }
59
60 return res;
61 }
62
63
intel_adsp_hda_dma_host_out_config(const struct device * dev,uint32_t channel,struct dma_config * dma_cfg)64 int intel_adsp_hda_dma_host_out_config(const struct device *dev,
65 uint32_t channel,
66 struct dma_config *dma_cfg)
67 {
68 const struct intel_adsp_hda_dma_cfg *const cfg = dev->config;
69 struct dma_block_config *blk_cfg;
70 uint8_t *buf;
71 int res;
72
73 __ASSERT(channel < cfg->dma_channels, "Channel does not exist");
74 __ASSERT(dma_cfg->block_count == 1,
75 "HDA does not support scatter gather or chained "
76 "block transfers.");
77 __ASSERT(dma_cfg->channel_direction == cfg->direction,
78 "Unexpected channel direction, HDA host out supports "
79 "HOST_TO_MEMORY");
80
81 blk_cfg = dma_cfg->head_block;
82 buf = (uint8_t *)(uintptr_t)(blk_cfg->dest_address);
83
84 res = intel_adsp_hda_set_buffer(cfg->base, cfg->regblock_size, channel, buf,
85 blk_cfg->block_size);
86
87 if (res == 0) {
88 *DGMBS(cfg->base, cfg->regblock_size, channel) =
89 blk_cfg->block_size & HDA_ALIGN_MASK;
90
91 intel_adsp_hda_set_sample_container_size(cfg->base, cfg->regblock_size, channel,
92 dma_cfg->dest_data_size);
93 }
94
95 return res;
96 }
97
intel_adsp_hda_dma_link_in_config(const struct device * dev,uint32_t channel,struct dma_config * dma_cfg)98 int intel_adsp_hda_dma_link_in_config(const struct device *dev,
99 uint32_t channel,
100 struct dma_config *dma_cfg)
101 {
102 const struct intel_adsp_hda_dma_cfg *const cfg = dev->config;
103 struct dma_block_config *blk_cfg;
104 uint8_t *buf;
105 int res;
106
107 __ASSERT(channel < cfg->dma_channels, "Channel does not exist");
108 __ASSERT(dma_cfg->block_count == 1,
109 "HDA does not support scatter gather or chained "
110 "block transfers.");
111 __ASSERT(dma_cfg->channel_direction == cfg->direction,
112 "Unexpected channel direction, HDA link in supports "
113 "PERIPHERAL_TO_MEMORY");
114
115 blk_cfg = dma_cfg->head_block;
116 buf = (uint8_t *)(uintptr_t)(blk_cfg->dest_address);
117 res = intel_adsp_hda_set_buffer(cfg->base, cfg->regblock_size, channel, buf,
118 blk_cfg->block_size);
119 if (res == 0) {
120 intel_adsp_hda_set_sample_container_size(cfg->base, cfg->regblock_size, channel,
121 dma_cfg->dest_data_size);
122 }
123
124 return res;
125 }
126
127
intel_adsp_hda_dma_link_out_config(const struct device * dev,uint32_t channel,struct dma_config * dma_cfg)128 int intel_adsp_hda_dma_link_out_config(const struct device *dev,
129 uint32_t channel,
130 struct dma_config *dma_cfg)
131 {
132 const struct intel_adsp_hda_dma_cfg *const cfg = dev->config;
133 struct dma_block_config *blk_cfg;
134 uint8_t *buf;
135 int res;
136
137 __ASSERT(channel < cfg->dma_channels, "Channel does not exist");
138 __ASSERT(dma_cfg->block_count == 1,
139 "HDA does not support scatter gather or chained "
140 "block transfers.");
141 __ASSERT(dma_cfg->channel_direction == cfg->direction,
142 "Unexpected channel direction, HDA link out supports "
143 "MEMORY_TO_PERIPHERAL");
144
145 blk_cfg = dma_cfg->head_block;
146 buf = (uint8_t *)(uintptr_t)(blk_cfg->source_address);
147
148 res = intel_adsp_hda_set_buffer(cfg->base, cfg->regblock_size, channel, buf,
149 blk_cfg->block_size);
150 if (res == 0) {
151 intel_adsp_hda_set_sample_container_size(cfg->base, cfg->regblock_size, channel,
152 dma_cfg->source_data_size);
153 }
154
155 return res;
156 }
157
158
intel_adsp_hda_dma_link_reload(const struct device * dev,uint32_t channel,uint32_t src,uint32_t dst,size_t size)159 int intel_adsp_hda_dma_link_reload(const struct device *dev, uint32_t channel,
160 uint32_t src, uint32_t dst, size_t size)
161 {
162 const struct intel_adsp_hda_dma_cfg *const cfg = dev->config;
163
164 __ASSERT(channel < cfg->dma_channels, "Channel does not exist");
165
166 intel_adsp_hda_link_commit(cfg->base, cfg->regblock_size, channel, size);
167
168 return 0;
169 }
170
intel_adsp_hda_dma_host_reload(const struct device * dev,uint32_t channel,uint32_t src,uint32_t dst,size_t size)171 int intel_adsp_hda_dma_host_reload(const struct device *dev, uint32_t channel,
172 uint32_t src, uint32_t dst, size_t size)
173 {
174 const struct intel_adsp_hda_dma_cfg *const cfg = dev->config;
175
176 __ASSERT(channel < cfg->dma_channels, "Channel does not exist");
177
178 #if CONFIG_DMA_INTEL_ADSP_HDA_TIMING_L1_EXIT
179 const size_t buf_size = intel_adsp_hda_get_buffer_size(cfg->base, cfg->regblock_size,
180 channel);
181
182 if (!buf_size) {
183 return -EIO;
184 }
185
186 intel_adsp_force_dmi_l0_state();
187 switch (cfg->direction) {
188 case HOST_TO_MEMORY:
189 ; /* Only statements can be labeled in C, a declaration is not valid */
190 const uint32_t rp = *DGBRP(cfg->base, cfg->regblock_size, channel);
191 const uint32_t next_rp = (rp + INTEL_HDA_MIN_FPI_INCREMENT_FOR_INTERRUPT) %
192 buf_size;
193
194 intel_adsp_hda_set_buffer_segment_ptr(cfg->base, cfg->regblock_size,
195 channel, next_rp);
196 intel_adsp_hda_enable_buffer_interrupt(cfg->base, cfg->regblock_size, channel);
197 break;
198 case MEMORY_TO_HOST:
199 ;
200 const uint32_t wp = *DGBWP(cfg->base, cfg->regblock_size, channel);
201 const uint32_t next_wp = (wp + INTEL_HDA_MIN_FPI_INCREMENT_FOR_INTERRUPT) %
202 buf_size;
203
204 intel_adsp_hda_set_buffer_segment_ptr(cfg->base, cfg->regblock_size,
205 channel, next_wp);
206 intel_adsp_hda_enable_buffer_interrupt(cfg->base, cfg->regblock_size, channel);
207 break;
208 default:
209 break;
210 }
211 #endif
212
213 intel_adsp_hda_host_commit(cfg->base, cfg->regblock_size, channel, size);
214
215 return 0;
216 }
217
intel_adsp_hda_dma_status(const struct device * dev,uint32_t channel,struct dma_status * stat)218 int intel_adsp_hda_dma_status(const struct device *dev, uint32_t channel,
219 struct dma_status *stat)
220 {
221 const struct intel_adsp_hda_dma_cfg *const cfg = dev->config;
222 bool xrun_det;
223
224 __ASSERT(channel < cfg->dma_channels, "Channel does not exist");
225
226 uint32_t unused = intel_adsp_hda_unused(cfg->base, cfg->regblock_size, channel);
227 uint32_t used = *DGBS(cfg->base, cfg->regblock_size, channel) - unused;
228
229 stat->dir = cfg->direction;
230 stat->busy = *DGCS(cfg->base, cfg->regblock_size, channel) & DGCS_GBUSY;
231 stat->write_position = *DGBWP(cfg->base, cfg->regblock_size, channel);
232 stat->read_position = *DGBRP(cfg->base, cfg->regblock_size, channel);
233 stat->pending_length = used;
234 stat->free = unused;
235
236 switch (cfg->direction) {
237 case MEMORY_TO_PERIPHERAL:
238 xrun_det = intel_adsp_hda_is_buffer_underrun(cfg->base, cfg->regblock_size,
239 channel);
240 if (xrun_det) {
241 intel_adsp_hda_underrun_clear(cfg->base, cfg->regblock_size, channel);
242 return -EPIPE;
243 }
244 break;
245 case PERIPHERAL_TO_MEMORY:
246 xrun_det = intel_adsp_hda_is_buffer_overrun(cfg->base, cfg->regblock_size,
247 channel);
248 if (xrun_det) {
249 intel_adsp_hda_overrun_clear(cfg->base, cfg->regblock_size, channel);
250 return -EPIPE;
251 }
252 break;
253 default:
254 break;
255 }
256
257 return 0;
258 }
259
intel_adsp_hda_dma_chan_filter(const struct device * dev,int channel,void * filter_param)260 bool intel_adsp_hda_dma_chan_filter(const struct device *dev, int channel, void *filter_param)
261 {
262 uint32_t requested_channel;
263
264 if (!filter_param) {
265 return true;
266 }
267
268 requested_channel = *(uint32_t *)filter_param;
269
270 if (channel == requested_channel) {
271 return true;
272 }
273
274 return false;
275 }
276
intel_adsp_hda_dma_start(const struct device * dev,uint32_t channel)277 int intel_adsp_hda_dma_start(const struct device *dev, uint32_t channel)
278 {
279 const struct intel_adsp_hda_dma_cfg *const cfg = dev->config;
280 uint32_t size;
281 bool set_fifordy;
282
283 __ASSERT(channel < cfg->dma_channels, "Channel does not exist");
284
285 #if CONFIG_PM_DEVICE_RUNTIME
286 bool first_use = false;
287 enum pm_device_state state;
288
289 /* If the device is used for the first time, we need to let the power domain know that
290 * we want to use it.
291 */
292 if (pm_device_state_get(dev, &state) == 0) {
293 first_use = state != PM_DEVICE_STATE_ACTIVE;
294 if (first_use) {
295 int ret = pm_device_runtime_get(dev);
296
297 if (ret < 0) {
298 return ret;
299 }
300 }
301 }
302 #endif
303
304 if (intel_adsp_hda_is_enabled(cfg->base, cfg->regblock_size, channel)) {
305 return 0;
306 }
307
308 set_fifordy = (cfg->direction == HOST_TO_MEMORY || cfg->direction == MEMORY_TO_HOST);
309 intel_adsp_hda_enable(cfg->base, cfg->regblock_size, channel, set_fifordy);
310
311 if (cfg->direction == MEMORY_TO_PERIPHERAL) {
312 size = intel_adsp_hda_get_buffer_size(cfg->base, cfg->regblock_size, channel);
313 intel_adsp_hda_link_commit(cfg->base, cfg->regblock_size, channel, size);
314 }
315
316 #if CONFIG_PM_DEVICE_RUNTIME
317 if (!first_use) {
318 return pm_device_runtime_get(dev);
319 }
320 #endif
321 return 0;
322 }
323
intel_adsp_hda_dma_stop(const struct device * dev,uint32_t channel)324 int intel_adsp_hda_dma_stop(const struct device *dev, uint32_t channel)
325 {
326 const struct intel_adsp_hda_dma_cfg *const cfg = dev->config;
327
328 __ASSERT(channel < cfg->dma_channels, "Channel does not exist");
329
330 if (!intel_adsp_hda_is_enabled(cfg->base, cfg->regblock_size, channel)) {
331 return 0;
332 }
333
334 intel_adsp_hda_disable(cfg->base, cfg->regblock_size, channel);
335
336 if (!WAIT_FOR(!intel_adsp_hda_is_enabled(cfg->base, cfg->regblock_size, channel), 1000,
337 k_busy_wait(1))) {
338 return -EBUSY;
339 }
340
341 return pm_device_runtime_put(dev);
342 }
343
intel_adsp_hda_channels_init(const struct device * dev)344 static void intel_adsp_hda_channels_init(const struct device *dev)
345 {
346 const struct intel_adsp_hda_dma_cfg *const cfg = dev->config;
347
348 for (uint32_t i = 0; i < cfg->dma_channels; i++) {
349 intel_adsp_hda_init(cfg->base, cfg->regblock_size, i);
350
351 if (intel_adsp_hda_is_enabled(cfg->base, cfg->regblock_size, i)) {
352 uint32_t size;
353
354 size = intel_adsp_hda_get_buffer_size(cfg->base, cfg->regblock_size, i);
355 intel_adsp_hda_disable(cfg->base, cfg->regblock_size, i);
356 intel_adsp_hda_link_commit(cfg->base, cfg->regblock_size, i, size);
357 }
358 }
359
360 #if CONFIG_DMA_INTEL_ADSP_HDA_TIMING_L1_EXIT
361 /* Configure interrupts */
362 if (cfg->irq_config) {
363 cfg->irq_config();
364 }
365 #endif
366 }
367
intel_adsp_hda_dma_init(const struct device * dev)368 int intel_adsp_hda_dma_init(const struct device *dev)
369 {
370 struct intel_adsp_hda_dma_data *data = dev->data;
371 const struct intel_adsp_hda_dma_cfg *const cfg = dev->config;
372
373 data->ctx.dma_channels = cfg->dma_channels;
374 data->ctx.atomic = data->channels_atomic;
375 data->ctx.magic = DMA_MAGIC;
376 #ifdef CONFIG_PM_DEVICE_RUNTIME
377 if (pm_device_on_power_domain(dev)) {
378 pm_device_init_off(dev);
379 } else {
380 intel_adsp_hda_channels_init(dev);
381 pm_device_init_suspended(dev);
382 }
383
384 return pm_device_runtime_enable(dev);
385 #else
386 intel_adsp_hda_channels_init(dev);
387 return 0;
388 #endif
389 }
390
intel_adsp_hda_dma_get_attribute(const struct device * dev,uint32_t type,uint32_t * value)391 int intel_adsp_hda_dma_get_attribute(const struct device *dev, uint32_t type, uint32_t *value)
392 {
393 switch (type) {
394 case DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT:
395 *value = DMA_BUF_ADDR_ALIGNMENT(
396 DT_COMPAT_GET_ANY_STATUS_OKAY(intel_adsp_hda_link_out));
397 break;
398 case DMA_ATTR_BUFFER_SIZE_ALIGNMENT:
399 *value = DMA_BUF_SIZE_ALIGNMENT(
400 DT_COMPAT_GET_ANY_STATUS_OKAY(intel_adsp_hda_link_out));
401 break;
402 case DMA_ATTR_COPY_ALIGNMENT:
403 *value = DMA_COPY_ALIGNMENT(DT_COMPAT_GET_ANY_STATUS_OKAY(intel_adsp_hda_link_out));
404 break;
405 case DMA_ATTR_MAX_BLOCK_COUNT:
406 *value = 1;
407 break;
408 default:
409 return -EINVAL;
410 }
411
412 return 0;
413 }
414
415 #ifdef CONFIG_PM_DEVICE
intel_adsp_hda_dma_pm_action(const struct device * dev,enum pm_device_action action)416 int intel_adsp_hda_dma_pm_action(const struct device *dev, enum pm_device_action action)
417 {
418 switch (action) {
419 case PM_DEVICE_ACTION_RESUME:
420 intel_adsp_hda_channels_init(dev);
421 break;
422 case PM_DEVICE_ACTION_SUSPEND:
423 case PM_DEVICE_ACTION_TURN_ON:
424 case PM_DEVICE_ACTION_TURN_OFF:
425 break;
426 default:
427 return -ENOTSUP;
428 }
429
430 return 0;
431 }
432 #endif
433
434 #define DEVICE_DT_GET_AND_COMMA(node_id) DEVICE_DT_GET(node_id),
435
intel_adsp_hda_dma_isr(void)436 void intel_adsp_hda_dma_isr(void)
437 {
438 #if CONFIG_DMA_INTEL_ADSP_HDA_TIMING_L1_EXIT
439 struct dma_context *dma_ctx;
440 const struct intel_adsp_hda_dma_cfg *cfg;
441 bool triggered_interrupts = false;
442 int i, j;
443 int expected_interrupts = 0;
444 const struct device *host_dev[] = {
445 #if CONFIG_DMA_INTEL_ADSP_HDA_HOST_OUT
446 DT_FOREACH_STATUS_OKAY(intel_adsp_hda_host_out, DEVICE_DT_GET_AND_COMMA)
447 #endif
448 #if CONFIG_DMA_INTEL_ADSP_HDA_HOST_IN
449 DT_FOREACH_STATUS_OKAY(intel_adsp_hda_host_in, DEVICE_DT_GET_AND_COMMA)
450 #endif
451 };
452
453 /*
454 * To initiate transfer, DSP must be in L0 state. Once the transfer is started, DSP can go
455 * to the low power L1 state, and the transfer will be able to continue and finish in L1
456 * state. Interrupts are configured to trigger after the first 32 bytes of data arrive.
457 * Once such an interrupt arrives, the transfer has already started. If all expected
458 * transfers have started, it is safe to allow the low power L1 state.
459 */
460
461 for (i = 0; i < ARRAY_SIZE(host_dev); i++) {
462 dma_ctx = (struct dma_context *)host_dev[i]->data;
463 cfg = host_dev[i]->config;
464
465 for (j = 0; j < dma_ctx->dma_channels; j++) {
466 if (!atomic_test_bit(dma_ctx->atomic, j))
467 continue;
468
469 if (!intel_adsp_hda_is_buffer_interrupt_enabled(cfg->base,
470 cfg->regblock_size, j))
471 continue;
472
473 if (intel_adsp_hda_check_buffer_interrupt(cfg->base,
474 cfg->regblock_size, j)) {
475 triggered_interrupts = true;
476 intel_adsp_hda_disable_buffer_interrupt(cfg->base,
477 cfg->regblock_size, j);
478 intel_adsp_hda_clear_buffer_interrupt(cfg->base,
479 cfg->regblock_size, j);
480 } else {
481 expected_interrupts++;
482 }
483 }
484 }
485
486 /*
487 * Allow entering low power L1 state only after all enabled interrupts arrived, i.e.,
488 * transfers started on all channels.
489 */
490 if (triggered_interrupts && expected_interrupts == 0) {
491 intel_adsp_allow_dmi_l1_state();
492 }
493 #endif
494 }
495