1 /*
2  * Copyright (c) 2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @brief Intel ADSP HDA DMA (Stream) driver
9  *
10  * HDA is effectively, from the DSP, a ringbuffer (fifo) where the read
11  * and write positions are maintained by the hardware and the software may
12  * commit read/writes by writing to another register (DGFPBI) the length of
13  * the read or write.
14  *
15  * It's important that the software knows the position in the ringbuffer to read
16  * or write from. It's also important that the buffer be placed in the correct
17  * memory region and aligned to 128 bytes. Lastly it's important the host and
18  * dsp coordinate the order in which operations takes place. Doing all that
19  * HDA streams are a fantastic bit of hardware and do their job well.
20  *
21  * There are 4 types of streams, with a set of each available to be used to
22  * communicate to or from the Host or Link. Each stream set is uni directional.
23  */
24 
25 #include <zephyr/drivers/dma.h>
26 
27 #include "dma_intel_adsp_hda.h"
28 #include <intel_adsp_hda.h>
29 
intel_adsp_hda_dma_host_in_config(const struct device * dev,uint32_t channel,struct dma_config * dma_cfg)30 int intel_adsp_hda_dma_host_in_config(const struct device *dev,
31 				       uint32_t channel,
32 				       struct dma_config *dma_cfg)
33 {
34 	const struct intel_adsp_hda_dma_cfg *const cfg = dev->config;
35 	struct dma_block_config *blk_cfg;
36 	uint8_t *buf;
37 	int res;
38 
39 	__ASSERT(channel < cfg->dma_channels, "Channel does not exist");
40 	__ASSERT(dma_cfg->block_count == 1,
41 		 "HDA does not support scatter gather or chained "
42 		 "block transfers.");
43 	__ASSERT(dma_cfg->channel_direction == cfg->direction,
44 		 "Unexpected channel direction, HDA host in supports "
45 		 "MEMORY_TO_HOST");
46 
47 	blk_cfg = dma_cfg->head_block;
48 	buf = (uint8_t *)(uintptr_t)(blk_cfg->source_address);
49 	res = intel_adsp_hda_set_buffer(cfg->base, cfg->regblock_size, channel, buf,
50 				  blk_cfg->block_size);
51 
52 	if (res == 0) {
53 		*DGMBS(cfg->base, cfg->regblock_size, channel) =
54 			blk_cfg->block_size & HDA_ALIGN_MASK;
55 
56 		intel_adsp_hda_set_sample_container_size(cfg->base, cfg->regblock_size, channel,
57 							 dma_cfg->source_data_size);
58 	}
59 
60 	return res;
61 }
62 
63 
intel_adsp_hda_dma_host_out_config(const struct device * dev,uint32_t channel,struct dma_config * dma_cfg)64 int intel_adsp_hda_dma_host_out_config(const struct device *dev,
65 					uint32_t channel,
66 					struct dma_config *dma_cfg)
67 {
68 	const struct intel_adsp_hda_dma_cfg *const cfg = dev->config;
69 	struct dma_block_config *blk_cfg;
70 	uint8_t *buf;
71 	int res;
72 
73 	__ASSERT(channel < cfg->dma_channels, "Channel does not exist");
74 	__ASSERT(dma_cfg->block_count == 1,
75 		 "HDA does not support scatter gather or chained "
76 		 "block transfers.");
77 	__ASSERT(dma_cfg->channel_direction == cfg->direction,
78 		 "Unexpected channel direction, HDA host out supports "
79 		 "HOST_TO_MEMORY");
80 
81 	blk_cfg = dma_cfg->head_block;
82 	buf = (uint8_t *)(uintptr_t)(blk_cfg->dest_address);
83 
84 	res = intel_adsp_hda_set_buffer(cfg->base, cfg->regblock_size, channel, buf,
85 				  blk_cfg->block_size);
86 
87 	if (res == 0) {
88 		*DGMBS(cfg->base, cfg->regblock_size, channel) =
89 			blk_cfg->block_size & HDA_ALIGN_MASK;
90 
91 		intel_adsp_hda_set_sample_container_size(cfg->base, cfg->regblock_size, channel,
92 							 dma_cfg->dest_data_size);
93 	}
94 
95 	return res;
96 }
97 
intel_adsp_hda_dma_link_in_config(const struct device * dev,uint32_t channel,struct dma_config * dma_cfg)98 int intel_adsp_hda_dma_link_in_config(const struct device *dev,
99 				       uint32_t channel,
100 				       struct dma_config *dma_cfg)
101 {
102 	const struct intel_adsp_hda_dma_cfg *const cfg = dev->config;
103 	struct dma_block_config *blk_cfg;
104 	uint8_t *buf;
105 	int res;
106 
107 	__ASSERT(channel < cfg->dma_channels, "Channel does not exist");
108 	__ASSERT(dma_cfg->block_count == 1,
109 		 "HDA does not support scatter gather or chained "
110 		 "block transfers.");
111 	__ASSERT(dma_cfg->channel_direction == cfg->direction,
112 		 "Unexpected channel direction, HDA link in supports "
113 		 "PERIPHERAL_TO_MEMORY");
114 
115 	blk_cfg = dma_cfg->head_block;
116 	buf = (uint8_t *)(uintptr_t)(blk_cfg->dest_address);
117 	res = intel_adsp_hda_set_buffer(cfg->base, cfg->regblock_size, channel, buf,
118 				  blk_cfg->block_size);
119 	if (res == 0) {
120 		intel_adsp_hda_set_sample_container_size(cfg->base, cfg->regblock_size, channel,
121 							 dma_cfg->dest_data_size);
122 	}
123 
124 	return res;
125 }
126 
127 
intel_adsp_hda_dma_link_out_config(const struct device * dev,uint32_t channel,struct dma_config * dma_cfg)128 int intel_adsp_hda_dma_link_out_config(const struct device *dev,
129 					uint32_t channel,
130 					struct dma_config *dma_cfg)
131 {
132 	const struct intel_adsp_hda_dma_cfg *const cfg = dev->config;
133 	struct dma_block_config *blk_cfg;
134 	uint8_t *buf;
135 	int res;
136 
137 	__ASSERT(channel < cfg->dma_channels, "Channel does not exist");
138 	__ASSERT(dma_cfg->block_count == 1,
139 		 "HDA does not support scatter gather or chained "
140 		 "block transfers.");
141 	__ASSERT(dma_cfg->channel_direction == cfg->direction,
142 		 "Unexpected channel direction, HDA link out supports "
143 		 "MEMORY_TO_PERIPHERAL");
144 
145 	blk_cfg = dma_cfg->head_block;
146 	buf = (uint8_t *)(uintptr_t)(blk_cfg->source_address);
147 
148 	res = intel_adsp_hda_set_buffer(cfg->base, cfg->regblock_size, channel, buf,
149 				  blk_cfg->block_size);
150 	if (res == 0) {
151 		intel_adsp_hda_set_sample_container_size(cfg->base, cfg->regblock_size, channel,
152 							 dma_cfg->source_data_size);
153 	}
154 
155 	return res;
156 }
157 
158 
intel_adsp_hda_dma_link_reload(const struct device * dev,uint32_t channel,uint32_t src,uint32_t dst,size_t size)159 int intel_adsp_hda_dma_link_reload(const struct device *dev, uint32_t channel,
160 				    uint32_t src, uint32_t dst, size_t size)
161 {
162 	const struct intel_adsp_hda_dma_cfg *const cfg = dev->config;
163 
164 	__ASSERT(channel < cfg->dma_channels, "Channel does not exist");
165 
166 	intel_adsp_hda_link_commit(cfg->base, cfg->regblock_size, channel, size);
167 
168 	return 0;
169 }
170 
intel_adsp_hda_dma_host_reload(const struct device * dev,uint32_t channel,uint32_t src,uint32_t dst,size_t size)171 int intel_adsp_hda_dma_host_reload(const struct device *dev, uint32_t channel,
172 				    uint32_t src, uint32_t dst, size_t size)
173 {
174 	const struct intel_adsp_hda_dma_cfg *const cfg = dev->config;
175 
176 	__ASSERT(channel < cfg->dma_channels, "Channel does not exist");
177 
178 #if CONFIG_DMA_INTEL_ADSP_HDA_TIMING_L1_EXIT
179 	const size_t buf_size = intel_adsp_hda_get_buffer_size(cfg->base, cfg->regblock_size,
180 							       channel);
181 
182 	if (!buf_size) {
183 		return -EIO;
184 	}
185 
186 	intel_adsp_force_dmi_l0_state();
187 	switch (cfg->direction) {
188 	case HOST_TO_MEMORY:
189 		; /* Only statements can be labeled in C, a declaration is not valid */
190 		const uint32_t rp = *DGBRP(cfg->base, cfg->regblock_size, channel);
191 		const uint32_t next_rp = (rp + INTEL_HDA_MIN_FPI_INCREMENT_FOR_INTERRUPT) %
192 			buf_size;
193 
194 		intel_adsp_hda_set_buffer_segment_ptr(cfg->base, cfg->regblock_size,
195 						      channel, next_rp);
196 		intel_adsp_hda_enable_buffer_interrupt(cfg->base, cfg->regblock_size, channel);
197 		break;
198 	case MEMORY_TO_HOST:
199 		;
200 		const uint32_t wp = *DGBWP(cfg->base, cfg->regblock_size, channel);
201 		const uint32_t next_wp = (wp + INTEL_HDA_MIN_FPI_INCREMENT_FOR_INTERRUPT) %
202 			buf_size;
203 
204 		intel_adsp_hda_set_buffer_segment_ptr(cfg->base, cfg->regblock_size,
205 						      channel, next_wp);
206 		intel_adsp_hda_enable_buffer_interrupt(cfg->base, cfg->regblock_size, channel);
207 		break;
208 	default:
209 		break;
210 	}
211 #endif
212 
213 	intel_adsp_hda_host_commit(cfg->base, cfg->regblock_size, channel, size);
214 
215 	return 0;
216 }
217 
intel_adsp_hda_dma_status(const struct device * dev,uint32_t channel,struct dma_status * stat)218 int intel_adsp_hda_dma_status(const struct device *dev, uint32_t channel,
219 	struct dma_status *stat)
220 {
221 	const struct intel_adsp_hda_dma_cfg *const cfg = dev->config;
222 	uint32_t llp_l = 0;
223 	uint32_t llp_u = 0;
224 	bool xrun_det;
225 
226 	__ASSERT(channel < cfg->dma_channels, "Channel does not exist");
227 
228 	uint32_t unused = intel_adsp_hda_unused(cfg->base, cfg->regblock_size, channel);
229 	uint32_t used = *DGBS(cfg->base, cfg->regblock_size, channel) - unused;
230 
231 	stat->dir = cfg->direction;
232 	stat->busy = *DGCS(cfg->base, cfg->regblock_size, channel) & DGCS_GBUSY;
233 	stat->write_position = *DGBWP(cfg->base, cfg->regblock_size, channel);
234 	stat->read_position = *DGBRP(cfg->base, cfg->regblock_size, channel);
235 	stat->pending_length = used;
236 	stat->free = unused;
237 
238 #if CONFIG_SOC_INTEL_ACE20_LNL || CONFIG_SOC_INTEL_ACE30
239 	/* Linear Link Position via HDA-DMA is only supported on ACE2 or newer */
240 	if (cfg->direction == MEMORY_TO_PERIPHERAL || cfg->direction == PERIPHERAL_TO_MEMORY) {
241 		uint32_t tmp;
242 
243 		tmp = *DGLLLPL(cfg->base, cfg->regblock_size, channel);
244 		llp_u = *DGLLLPU(cfg->base, cfg->regblock_size, channel);
245 		llp_l = *DGLLLPL(cfg->base, cfg->regblock_size, channel);
246 		if (tmp > llp_l) {
247 			/* re-read the LLPU value, as LLPL just wrapped */
248 			llp_u = *DGLLLPU(cfg->base, cfg->regblock_size, channel);
249 		}
250 	}
251 #endif
252 	stat->total_copied = ((uint64_t)llp_u << 32) | llp_l;
253 
254 	switch (cfg->direction) {
255 	case MEMORY_TO_PERIPHERAL:
256 		xrun_det = intel_adsp_hda_is_buffer_underrun(cfg->base, cfg->regblock_size,
257 							     channel);
258 		if (xrun_det) {
259 			intel_adsp_hda_underrun_clear(cfg->base, cfg->regblock_size, channel);
260 			return -EPIPE;
261 		}
262 		break;
263 	case PERIPHERAL_TO_MEMORY:
264 		xrun_det = intel_adsp_hda_is_buffer_overrun(cfg->base, cfg->regblock_size,
265 							    channel);
266 		if (xrun_det) {
267 			intel_adsp_hda_overrun_clear(cfg->base, cfg->regblock_size, channel);
268 			return -EPIPE;
269 		}
270 		break;
271 	default:
272 		break;
273 	}
274 
275 	return 0;
276 }
277 
intel_adsp_hda_dma_chan_filter(const struct device * dev,int channel,void * filter_param)278 bool intel_adsp_hda_dma_chan_filter(const struct device *dev, int channel, void *filter_param)
279 {
280 	uint32_t requested_channel;
281 
282 	if (!filter_param) {
283 		return true;
284 	}
285 
286 	requested_channel = *(uint32_t *)filter_param;
287 
288 	if (channel == requested_channel) {
289 		return true;
290 	}
291 
292 	return false;
293 }
294 
intel_adsp_hda_dma_start(const struct device * dev,uint32_t channel)295 int intel_adsp_hda_dma_start(const struct device *dev, uint32_t channel)
296 {
297 	const struct intel_adsp_hda_dma_cfg *const cfg = dev->config;
298 	uint32_t size;
299 	bool set_fifordy;
300 
301 	__ASSERT(channel < cfg->dma_channels, "Channel does not exist");
302 
303 #if CONFIG_PM_DEVICE_RUNTIME
304 	bool first_use = false;
305 	enum pm_device_state state;
306 
307 	/* If the device is used for the first time, we need to let the power domain know that
308 	 * we want to use it.
309 	 */
310 	if (pm_device_state_get(dev, &state) == 0) {
311 		first_use = state != PM_DEVICE_STATE_ACTIVE;
312 		if (first_use) {
313 			int ret = pm_device_runtime_get(dev);
314 
315 			if (ret < 0) {
316 				return ret;
317 			}
318 		}
319 	}
320 #endif
321 
322 	if (intel_adsp_hda_is_enabled(cfg->base, cfg->regblock_size, channel)) {
323 		return 0;
324 	}
325 
326 	set_fifordy = (cfg->direction == HOST_TO_MEMORY || cfg->direction == MEMORY_TO_HOST);
327 	intel_adsp_hda_enable(cfg->base, cfg->regblock_size, channel, set_fifordy);
328 
329 	if (cfg->direction == MEMORY_TO_PERIPHERAL) {
330 		size = intel_adsp_hda_get_buffer_size(cfg->base, cfg->regblock_size, channel);
331 		intel_adsp_hda_link_commit(cfg->base, cfg->regblock_size, channel, size);
332 	}
333 
334 #if CONFIG_PM_DEVICE_RUNTIME
335 	if (!first_use) {
336 		return pm_device_runtime_get(dev);
337 	}
338 #endif
339 	return 0;
340 }
341 
intel_adsp_hda_dma_stop(const struct device * dev,uint32_t channel)342 int intel_adsp_hda_dma_stop(const struct device *dev, uint32_t channel)
343 {
344 	const struct intel_adsp_hda_dma_cfg *const cfg = dev->config;
345 
346 	__ASSERT(channel < cfg->dma_channels, "Channel does not exist");
347 
348 	if (!intel_adsp_hda_is_enabled(cfg->base, cfg->regblock_size, channel)) {
349 		return 0;
350 	}
351 
352 	intel_adsp_hda_disable(cfg->base, cfg->regblock_size, channel);
353 
354 	if (!WAIT_FOR(!intel_adsp_hda_is_enabled(cfg->base, cfg->regblock_size, channel), 1000,
355 			k_busy_wait(1))) {
356 		return -EBUSY;
357 	}
358 
359 	return pm_device_runtime_put(dev);
360 }
361 
intel_adsp_hda_channels_init(const struct device * dev)362 static void intel_adsp_hda_channels_init(const struct device *dev)
363 {
364 	const struct intel_adsp_hda_dma_cfg *const cfg = dev->config;
365 
366 	for (uint32_t i = 0; i < cfg->dma_channels; i++) {
367 		intel_adsp_hda_init(cfg->base, cfg->regblock_size, i);
368 
369 		if (intel_adsp_hda_is_enabled(cfg->base, cfg->regblock_size, i)) {
370 			uint32_t size;
371 
372 			size = intel_adsp_hda_get_buffer_size(cfg->base, cfg->regblock_size, i);
373 			intel_adsp_hda_disable(cfg->base, cfg->regblock_size, i);
374 			intel_adsp_hda_link_commit(cfg->base, cfg->regblock_size, i, size);
375 		}
376 	}
377 
378 #if CONFIG_DMA_INTEL_ADSP_HDA_TIMING_L1_EXIT
379 	/* Configure interrupts */
380 	if (cfg->irq_config) {
381 		cfg->irq_config();
382 	}
383 #endif
384 }
385 
intel_adsp_hda_dma_pm_action(const struct device * dev,enum pm_device_action action)386 int intel_adsp_hda_dma_pm_action(const struct device *dev, enum pm_device_action action)
387 {
388 	ARG_UNUSED(dev);
389 	switch (action) {
390 	case PM_DEVICE_ACTION_RESUME:
391 	case PM_DEVICE_ACTION_SUSPEND:
392 	case PM_DEVICE_ACTION_TURN_ON:
393 	case PM_DEVICE_ACTION_TURN_OFF:
394 		break;
395 	default:
396 		return -ENOTSUP;
397 	}
398 
399 	return 0;
400 }
401 
intel_adsp_hda_dma_init(const struct device * dev)402 int intel_adsp_hda_dma_init(const struct device *dev)
403 {
404 	struct intel_adsp_hda_dma_data *data = dev->data;
405 	const struct intel_adsp_hda_dma_cfg *const cfg = dev->config;
406 
407 	data->ctx.dma_channels = cfg->dma_channels;
408 	data->ctx.atomic = data->channels_atomic;
409 	data->ctx.magic = DMA_MAGIC;
410 	intel_adsp_hda_channels_init(dev);
411 	return pm_device_driver_init(dev, intel_adsp_hda_dma_pm_action);
412 }
413 
intel_adsp_hda_dma_get_attribute(const struct device * dev,uint32_t type,uint32_t * value)414 int intel_adsp_hda_dma_get_attribute(const struct device *dev, uint32_t type, uint32_t *value)
415 {
416 	switch (type) {
417 	case DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT:
418 		*value = DMA_BUF_ADDR_ALIGNMENT(
419 				DT_COMPAT_GET_ANY_STATUS_OKAY(intel_adsp_hda_link_out));
420 		break;
421 	case DMA_ATTR_BUFFER_SIZE_ALIGNMENT:
422 		*value = DMA_BUF_SIZE_ALIGNMENT(
423 				DT_COMPAT_GET_ANY_STATUS_OKAY(intel_adsp_hda_link_out));
424 		break;
425 	case DMA_ATTR_COPY_ALIGNMENT:
426 		*value = DMA_COPY_ALIGNMENT(DT_COMPAT_GET_ANY_STATUS_OKAY(intel_adsp_hda_link_out));
427 		break;
428 	case DMA_ATTR_MAX_BLOCK_COUNT:
429 		*value = 1;
430 		break;
431 	default:
432 		return -EINVAL;
433 	}
434 
435 	return 0;
436 }
437 
438 #define DEVICE_DT_GET_AND_COMMA(node_id) DEVICE_DT_GET(node_id),
439 
intel_adsp_hda_dma_isr(void)440 void intel_adsp_hda_dma_isr(void)
441 {
442 #if CONFIG_DMA_INTEL_ADSP_HDA_TIMING_L1_EXIT
443 	struct dma_context *dma_ctx;
444 	const struct intel_adsp_hda_dma_cfg *cfg;
445 	bool triggered_interrupts = false;
446 	int i, j;
447 	int expected_interrupts = 0;
448 	atomic_val_t enabled_chs;
449 	const struct device *host_dev[] = {
450 #if CONFIG_DMA_INTEL_ADSP_HDA_HOST_OUT
451 		DT_FOREACH_STATUS_OKAY(intel_adsp_hda_host_out, DEVICE_DT_GET_AND_COMMA)
452 #endif
453 #if CONFIG_DMA_INTEL_ADSP_HDA_HOST_IN
454 		DT_FOREACH_STATUS_OKAY(intel_adsp_hda_host_in, DEVICE_DT_GET_AND_COMMA)
455 #endif
456 	};
457 
458 	/*
459 	 * To initiate transfer, DSP must be in L0 state. Once the transfer is started, DSP can go
460 	 * to the low power L1 state, and the transfer will be able to continue and finish in L1
461 	 * state. Interrupts are configured to trigger after the first 32 bytes of data arrive.
462 	 * Once such an interrupt arrives, the transfer has already started. If all expected
463 	 * transfers have started, it is safe to allow the low power L1 state.
464 	 */
465 
466 	for (i = 0; i < ARRAY_SIZE(host_dev); i++) {
467 		dma_ctx = (struct dma_context *)host_dev[i]->data;
468 		cfg = host_dev[i]->config;
469 		enabled_chs = atomic_get(dma_ctx->atomic);
470 		for (j = 0; enabled_chs && j < dma_ctx->dma_channels; j++) {
471 			if (!(enabled_chs & BIT(j))) {
472 				continue;
473 			}
474 			enabled_chs &= ~(BIT(j));
475 
476 			if (!intel_adsp_hda_is_buffer_interrupt_enabled(cfg->base,
477 									cfg->regblock_size, j)) {
478 				continue;
479 			}
480 
481 			if (intel_adsp_hda_check_buffer_interrupt(cfg->base,
482 								  cfg->regblock_size, j)) {
483 				triggered_interrupts = true;
484 				intel_adsp_hda_disable_buffer_interrupt(cfg->base,
485 									cfg->regblock_size, j);
486 				intel_adsp_hda_clear_buffer_interrupt(cfg->base,
487 								      cfg->regblock_size, j);
488 			} else {
489 				expected_interrupts++;
490 			}
491 		}
492 	}
493 
494 	/*
495 	 * Allow entering low power L1 state only after all enabled interrupts arrived, i.e.,
496 	 * transfers started on all channels.
497 	 */
498 	if (triggered_interrupts && expected_interrupts == 0) {
499 		intel_adsp_allow_dmi_l1_state();
500 	}
501 #endif
502 }
503