1 /*
2  * Copyright (c) 2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <adsp_interrupt.h>
8 #include <zephyr/drivers/dma.h>
9 #include <zephyr/cache.h>
10 
11 #define DT_DRV_COMPAT intel_adsp_gpdma
12 
13 #define GPDMA_CTL_OFFSET 0x0004
14 #define GPDMA_CTL_FDCGB BIT(0)
15 #define GPDMA_CTL_DCGD BIT(30)
16 
17 /* TODO make device tree defined? */
18 #define GPDMA_CHLLPC_OFFSET(channel) (0x0010 + channel*0x10)
19 #define GPDMA_CHLLPC_EN BIT(7)
20 #define GPDMA_CHLLPC_DHRS(x) SET_BITS(6, 0, x)
21 
22 /* TODO make device tree defined? */
23 #define GPDMA_CHLLPL(channel) (0x0018 + channel*0x10)
24 #define GPDMA_CHLLPU(channel) (0x001c + channel*0x10)
25 
26 #define GPDMA_OSEL(x) SET_BITS(25, 24, x)
27 #define SHIM_CLKCTL_LPGPDMA_SPA	BIT(0)
28 #define SHIM_CLKCTL_LPGPDMA_CPA	BIT(8)
29 
30 # define DSP_INIT_LPGPDMA(x)	  (0x71A60 + (2*x))
31 # define LPGPDMA_CTLOSEL_FLAG	  BIT(15)
32 # define LPGPDMA_CHOSEL_FLAG	  0xFF
33 
34 #include "dma_dw_common.h"
35 #include <zephyr/pm/device.h>
36 #include <zephyr/pm/device_runtime.h>
37 
38 #define LOG_LEVEL CONFIG_DMA_LOG_LEVEL
39 #include <zephyr/logging/log.h>
40 #include <zephyr/irq.h>
41 LOG_MODULE_REGISTER(dma_intel_adsp_gpdma);
42 
43 
44 /* Device run time data */
45 struct intel_adsp_gpdma_data {
46 	struct dw_dma_dev_data dw_data;
47 };
48 
49 /* Device constant configuration parameters */
50 struct intel_adsp_gpdma_cfg {
51 	struct dw_dma_dev_cfg dw_cfg;
52 	uint32_t shim;
53 };
54 
55 #ifdef DMA_INTEL_ADSP_GPDMA_DEBUG
intel_adsp_gpdma_dump_registers(const struct device * dev,uint32_t channel)56 static void intel_adsp_gpdma_dump_registers(const struct device *dev, uint32_t channel)
57 {
58 	const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config;
59 	const struct dw_dma_dev_cfg *const dw_cfg = &dev_cfg->dw_cfg;
60 	uint32_t cap, ctl, ipptr, llpc, llpl, llpu;
61 	int i;
62 
63 	/* Shims */
64 	cap = dw_read(dev_cfg->shim, 0x0);
65 	ctl = dw_read(dev_cfg->shim, 0x4);
66 	ipptr = dw_read(dev_cfg->shim, 0x8);
67 	llpc = dw_read(dev_cfg->shim, GPDMA_CHLLPC_OFFSET(channel));
68 	llpl = dw_read(dev_cfg->shim, GPDMA_CHLLPL(channel));
69 	llpu = dw_read(dev_cfg->shim, GPDMA_CHLLPU(channel));
70 
71 	LOG_INF("%s: channel: %d cap %x, ctl %x, ipptr %x, llpc %x, llpl %x, llpu %x", dev->name,
72 		channel, cap, ctl, ipptr, llpc, llpl, llpu);
73 
74 	/* Channel Register Dump */
75 	for (i = 0; i <= DW_DMA_CHANNEL_REGISTER_OFFSET_END; i += 0x8) {
76 		LOG_INF(" channel register offset: %#x value: %#x\n", chan_reg_offs[i],
77 			dw_read(dw_cfg->base, DW_CHAN_OFFSET(channel) + chan_reg_offs[i]));
78 	}
79 
80 	/* IP Register Dump */
81 	for (i = DW_DMA_CHANNEL_REGISTER_OFFSET_START; i <= DW_DMA_CHANNEL_REGISTER_OFFSET_END;
82 	     i += 0x8) {
83 		LOG_INF(" ip register offset: %#x value: %#x\n", ip_reg_offs[i],
84 			dw_read(dw_cfg->base, ip_reg_offs[i]));
85 	}
86 }
87 #endif
88 
intel_adsp_gpdma_llp_config(const struct device * dev,uint32_t channel,uint32_t dma_slot)89 static void intel_adsp_gpdma_llp_config(const struct device *dev,
90 					uint32_t channel, uint32_t dma_slot)
91 {
92 #ifdef CONFIG_DMA_INTEL_ADSP_GPDMA_HAS_LLP
93 	const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config;
94 
95 	dw_write(dev_cfg->shim, GPDMA_CHLLPC_OFFSET(channel),
96 		 GPDMA_CHLLPC_DHRS(dma_slot));
97 #endif
98 }
99 
intel_adsp_gpdma_llp_enable(const struct device * dev,uint32_t channel)100 static inline void intel_adsp_gpdma_llp_enable(const struct device *dev,
101 					       uint32_t channel)
102 {
103 #ifdef CONFIG_DMA_INTEL_ADSP_GPDMA_HAS_LLP
104 	const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config;
105 	uint32_t val;
106 
107 	val = dw_read(dev_cfg->shim, GPDMA_CHLLPC_OFFSET(channel));
108 	if (!(val & GPDMA_CHLLPC_EN)) {
109 		dw_write(dev_cfg->shim, GPDMA_CHLLPC_OFFSET(channel),
110 			 val | GPDMA_CHLLPC_EN);
111 	}
112 #endif
113 }
114 
intel_adsp_gpdma_llp_disable(const struct device * dev,uint32_t channel)115 static inline void intel_adsp_gpdma_llp_disable(const struct device *dev,
116 						uint32_t channel)
117 {
118 #ifdef CONFIG_DMA_INTEL_ADSP_GPDMA_HAS_LLP
119 	const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config;
120 	uint32_t val;
121 
122 	val = dw_read(dev_cfg->shim, GPDMA_CHLLPC_OFFSET(channel));
123 	dw_write(dev_cfg->shim, GPDMA_CHLLPC_OFFSET(channel),
124 		 val | GPDMA_CHLLPC_EN);
125 #endif
126 }
127 
intel_adsp_gpdma_llp_read(const struct device * dev,uint32_t channel,uint32_t * llp_l,uint32_t * llp_u)128 static inline void intel_adsp_gpdma_llp_read(const struct device *dev,
129 					uint32_t channel, uint32_t *llp_l,
130 					uint32_t *llp_u)
131 {
132 #ifdef CONFIG_DMA_INTEL_ADSP_GPDMA_HAS_LLP
133 	const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config;
134 	uint32_t tmp;
135 
136 	tmp = dw_read(dev_cfg->shim, GPDMA_CHLLPL(channel));
137 	*llp_u = dw_read(dev_cfg->shim, GPDMA_CHLLPU(channel));
138 	*llp_l = dw_read(dev_cfg->shim, GPDMA_CHLLPL(channel));
139 	if (tmp > *llp_l) {
140 		/* re-read the LLPU value, as LLPL just wrapped */
141 		*llp_u = dw_read(dev_cfg->shim, GPDMA_CHLLPU(channel));
142 	}
143 #endif
144 }
145 
146 
intel_adsp_gpdma_config(const struct device * dev,uint32_t channel,struct dma_config * cfg)147 static int intel_adsp_gpdma_config(const struct device *dev, uint32_t channel,
148 				struct dma_config *cfg)
149 {
150 	int res = dw_dma_config(dev, channel, cfg);
151 
152 	if (res != 0) {
153 		return res;
154 	}
155 
156 	/* Assume all scatter/gathers are for the same device? */
157 	switch (cfg->channel_direction) {
158 	case MEMORY_TO_PERIPHERAL:
159 	case PERIPHERAL_TO_MEMORY:
160 		LOG_DBG("%s: channel %d configuring llp for %x", dev->name, channel, cfg->dma_slot);
161 		intel_adsp_gpdma_llp_config(dev, channel, cfg->dma_slot);
162 		break;
163 	default:
164 		break;
165 	}
166 
167 	return res;
168 }
169 
intel_adsp_gpdma_start(const struct device * dev,uint32_t channel)170 static int intel_adsp_gpdma_start(const struct device *dev, uint32_t channel)
171 {
172 	int ret = 0;
173 #if CONFIG_PM_DEVICE && CONFIG_SOC_SERIES_INTEL_ADSP_ACE
174 	bool first_use = false;
175 	enum pm_device_state state;
176 
177 	/* We need to power-up device before using it. So in case of a GPDMA, we need to check if
178 	 * the current instance is already active, and if not, we let the power manager know that
179 	 * we want to use it.
180 	 */
181 	if (pm_device_state_get(dev, &state) != -ENOSYS) {
182 		first_use = state != PM_DEVICE_STATE_ACTIVE;
183 		if (first_use) {
184 			ret = pm_device_runtime_get(dev);
185 			if (ret < 0) {
186 				return ret;
187 			}
188 		}
189 	}
190 #endif
191 
192 	intel_adsp_gpdma_llp_enable(dev, channel);
193 	ret = dw_dma_start(dev, channel);
194 	if (ret != 0) {
195 		intel_adsp_gpdma_llp_disable(dev, channel);
196 	}
197 
198 #if CONFIG_PM_DEVICE && CONFIG_SOC_SERIES_INTEL_ADSP_ACE
199 	/* Device usage is counted by the calls of dw_dma_start and dw_dma_stop. For the first use,
200 	 * we need to make sure that the pm_device_runtime_get and pm_device_runtime_put functions
201 	 * calls are balanced.
202 	 */
203 	if (first_use) {
204 		ret = pm_device_runtime_put(dev);
205 	}
206 #endif
207 
208 	return ret;
209 }
210 
intel_adsp_gpdma_stop(const struct device * dev,uint32_t channel)211 static int intel_adsp_gpdma_stop(const struct device *dev, uint32_t channel)
212 {
213 	int ret = dw_dma_stop(dev, channel);
214 
215 	if (ret == 0) {
216 		intel_adsp_gpdma_llp_disable(dev, channel);
217 	}
218 
219 	return ret;
220 }
221 
intel_adsp_gpdma_copy(const struct device * dev,uint32_t channel,uint32_t src,uint32_t dst,size_t size)222 static int intel_adsp_gpdma_copy(const struct device *dev, uint32_t channel,
223 		    uint32_t src, uint32_t dst, size_t size)
224 {
225 	struct dw_dma_dev_data *const dev_data = dev->data;
226 	struct dw_dma_chan_data *chan_data;
227 
228 	if (channel >= DW_MAX_CHAN) {
229 		return -EINVAL;
230 	}
231 
232 	chan_data = &dev_data->chan[channel];
233 
234 	/* default action is to clear the DONE bit for all LLI making
235 	 * sure the cache is coherent between DSP and DMAC.
236 	 */
237 	for (int i = 0; i < chan_data->lli_count; i++) {
238 		chan_data->lli[i].ctrl_hi &= ~DW_CTLH_DONE(1);
239 	}
240 
241 	chan_data->ptr_data.current_ptr += size;
242 	if (chan_data->ptr_data.current_ptr >= chan_data->ptr_data.end_ptr) {
243 		chan_data->ptr_data.current_ptr = chan_data->ptr_data.start_ptr +
244 			(chan_data->ptr_data.current_ptr - chan_data->ptr_data.end_ptr);
245 	}
246 
247 	return 0;
248 }
249 
250 /* Disables automatic clock gating (force disable clock gate) */
intel_adsp_gpdma_clock_enable(const struct device * dev)251 static void intel_adsp_gpdma_clock_enable(const struct device *dev)
252 {
253 	const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config;
254 	uint32_t reg = dev_cfg->shim + GPDMA_CTL_OFFSET;
255 	uint32_t val;
256 
257 	if (IS_ENABLED(CONFIG_SOC_SERIES_INTEL_ADSP_ACE)) {
258 		val = sys_read32(reg) | GPDMA_CTL_DCGD;
259 	} else {
260 		val = GPDMA_CTL_FDCGB;
261 	}
262 
263 	sys_write32(val, reg);
264 }
265 
266 #ifdef CONFIG_PM_DEVICE
intel_adsp_gpdma_clock_disable(const struct device * dev)267 static void intel_adsp_gpdma_clock_disable(const struct device *dev)
268 {
269 #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE
270 	const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config;
271 	uint32_t reg = dev_cfg->shim + GPDMA_CTL_OFFSET;
272 	uint32_t val = sys_read32(reg) & ~GPDMA_CTL_DCGD;
273 
274 	sys_write32(val, reg);
275 #endif
276 }
277 #endif
278 
intel_adsp_gpdma_claim_ownership(const struct device * dev)279 static void intel_adsp_gpdma_claim_ownership(const struct device *dev)
280 {
281 #ifdef CONFIG_DMA_INTEL_ADSP_GPDMA_NEED_CONTROLLER_OWNERSHIP
282 #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE
283 	const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config;
284 	uint32_t reg = dev_cfg->shim + GPDMA_CTL_OFFSET;
285 	uint32_t val = sys_read32(reg) | GPDMA_OSEL(0x3);
286 
287 	sys_write32(val, reg);
288 #else
289 	sys_write32(LPGPDMA_CHOSEL_FLAG | LPGPDMA_CTLOSEL_FLAG, DSP_INIT_LPGPDMA(0));
290 	sys_write32(LPGPDMA_CHOSEL_FLAG | LPGPDMA_CTLOSEL_FLAG, DSP_INIT_LPGPDMA(1));
291 	ARG_UNUSED(dev);
292 #endif /* CONFIG_SOC_SERIES_INTEL_ADSP_ACE */
293 #endif /* CONFIG_DMA_INTEL_ADSP_GPDMA_NEED_CONTROLLER_OWNERSHIP */
294 }
295 
296 #ifdef CONFIG_PM_DEVICE
intel_adsp_gpdma_release_ownership(const struct device * dev)297 static void intel_adsp_gpdma_release_ownership(const struct device *dev)
298 {
299 #ifdef CONFIG_DMA_INTEL_ADSP_GPDMA_NEED_CONTROLLER_OWNERSHIP
300 #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE
301 	const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config;
302 	uint32_t reg = dev_cfg->shim + GPDMA_CTL_OFFSET;
303 	uint32_t val = sys_read32(reg) & ~GPDMA_OSEL(0x3);
304 
305 	sys_write32(val, reg);
306 	/* CHECKME: Do CAVS platforms set ownership over DMA,
307 	 * if yes, add support for it releasing.
308 	 */
309 #endif /* CONFIG_SOC_SERIES_INTEL_ADSP_ACE */
310 #endif /* CONFIG_DMA_INTEL_ADSP_GPDMA_NEED_CONTROLLER_OWNERSHIP */
311 }
312 #endif
313 
314 #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE
intel_adsp_gpdma_enable(const struct device * dev)315 static int intel_adsp_gpdma_enable(const struct device *dev)
316 {
317 	const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config;
318 	uint32_t reg = dev_cfg->shim + GPDMA_CTL_OFFSET;
319 
320 	sys_write32(SHIM_CLKCTL_LPGPDMA_SPA, reg);
321 
322 	if (!WAIT_FOR((sys_read32(reg) & SHIM_CLKCTL_LPGPDMA_CPA), 10000,
323 		      k_busy_wait(1))) {
324 		return -1;
325 	}
326 
327 	return 0;
328 }
329 
330 #ifdef CONFIG_PM_DEVICE
intel_adsp_gpdma_disable(const struct device * dev)331 static int intel_adsp_gpdma_disable(const struct device *dev)
332 {
333 	const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config;
334 	uint32_t reg = dev_cfg->shim + GPDMA_CTL_OFFSET;
335 
336 	sys_write32(sys_read32(reg) & ~SHIM_CLKCTL_LPGPDMA_SPA, reg);
337 	return 0;
338 }
339 #endif /* CONFIG_PM_DEVICE */
340 #endif /* CONFIG_SOC_SERIES_INTEL_ADSP_ACE */
341 
intel_adsp_gpdma_power_on(const struct device * dev)342 static int intel_adsp_gpdma_power_on(const struct device *dev)
343 {
344 	const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config;
345 	int ret;
346 
347 #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE
348 	/* Power up */
349 	ret = intel_adsp_gpdma_enable(dev);
350 
351 	if (ret != 0) {
352 		LOG_ERR("%s: failed to initialize", dev->name);
353 		goto out;
354 	}
355 #endif
356 
357 	/* DW DMA Owner Select to DSP */
358 	intel_adsp_gpdma_claim_ownership(dev);
359 
360 	/* Disable dynamic clock gating appropriately before initializing */
361 	intel_adsp_gpdma_clock_enable(dev);
362 
363 	/* Disable all channels and Channel interrupts */
364 	ret = dw_dma_setup(dev);
365 	if (ret != 0) {
366 		LOG_ERR("%s: failed to initialize", dev->name);
367 		goto out;
368 	}
369 
370 	/* Configure interrupts */
371 	dev_cfg->dw_cfg.irq_config();
372 
373 	LOG_INF("%s: initialized", dev->name);
374 
375 out:
376 	return 0;
377 }
378 
379 #ifdef CONFIG_PM_DEVICE
intel_adsp_gpdma_power_off(const struct device * dev)380 static int intel_adsp_gpdma_power_off(const struct device *dev)
381 {
382 	LOG_INF("%s: power off", dev->name);
383 	/* Enabling dynamic clock gating */
384 	intel_adsp_gpdma_clock_disable(dev);
385 
386 	/* Relesing DMA ownership*/
387 	intel_adsp_gpdma_release_ownership(dev);
388 #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE
389 	/* Power down */
390 	return intel_adsp_gpdma_disable(dev);
391 #else
392 	return 0;
393 #endif /* CONFIG_SOC_SERIES_INTEL_ADSP_ACE */
394 }
395 #endif /* CONFIG_PM_DEVICE */
396 
intel_adsp_gpdma_get_status(const struct device * dev,uint32_t channel,struct dma_status * stat)397 int intel_adsp_gpdma_get_status(const struct device *dev, uint32_t channel, struct dma_status *stat)
398 {
399 	uint32_t llp_l = 0;
400 	uint32_t llp_u = 0;
401 
402 	if (channel >= DW_MAX_CHAN) {
403 		return -EINVAL;
404 	}
405 
406 	intel_adsp_gpdma_llp_read(dev, channel, &llp_l, &llp_u);
407 	stat->total_copied = ((uint64_t)llp_u << 32) | llp_l;
408 
409 	return dw_dma_get_status(dev, channel, stat);
410 }
411 
intel_adsp_gpdma_get_attribute(const struct device * dev,uint32_t type,uint32_t * value)412 int intel_adsp_gpdma_get_attribute(const struct device *dev, uint32_t type, uint32_t *value)
413 {
414 	switch (type) {
415 	case DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT:
416 		*value = sys_cache_data_line_size_get();
417 		break;
418 	case DMA_ATTR_BUFFER_SIZE_ALIGNMENT:
419 		*value = DMA_BUF_SIZE_ALIGNMENT(DT_COMPAT_GET_ANY_STATUS_OKAY(intel_adsp_gpdma));
420 		break;
421 	case DMA_ATTR_COPY_ALIGNMENT:
422 		*value = DMA_COPY_ALIGNMENT(DT_COMPAT_GET_ANY_STATUS_OKAY(intel_adsp_gpdma));
423 		break;
424 	case DMA_ATTR_MAX_BLOCK_COUNT:
425 		*value = CONFIG_DMA_DW_LLI_POOL_SIZE;
426 		break;
427 	default:
428 		return -EINVAL;
429 	}
430 
431 	return 0;
432 }
433 
434 #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE
ace_gpdma_intc_unmask(void)435 static inline void ace_gpdma_intc_unmask(void)
436 {
437 	ACE_DINT[0].ie[ACE_INTL_GPDMA] = BIT(0);
438 }
439 #else
ace_gpdma_intc_unmask(void)440 static inline void ace_gpdma_intc_unmask(void) {}
441 #endif
442 
gpdma_pm_action(const struct device * dev,enum pm_device_action action)443 static int gpdma_pm_action(const struct device *dev, enum pm_device_action action)
444 {
445 	switch (action) {
446 	case PM_DEVICE_ACTION_RESUME:
447 		return intel_adsp_gpdma_power_on(dev);
448 	case PM_DEVICE_ACTION_SUSPEND:
449 #ifdef CONFIG_PM_DEVICE
450 		return intel_adsp_gpdma_power_off(dev);
451 #endif
452 	/* ON and OFF actions are used only by the power domain to change internal power status of
453 	 * the device. OFF state mean that device and its power domain are disabled, SUSPEND mean
454 	 * that device is power off but domain is already power on.
455 	 */
456 	case PM_DEVICE_ACTION_TURN_ON:
457 	case PM_DEVICE_ACTION_TURN_OFF:
458 		break;
459 	default:
460 		return -ENOTSUP;
461 	}
462 
463 	return 0;
464 }
465 
intel_adsp_gpdma_init(const struct device * dev)466 int intel_adsp_gpdma_init(const struct device *dev)
467 {
468 	struct dw_dma_dev_data *const dev_data = dev->data;
469 
470 	/* Setup context and atomics for channels */
471 	dev_data->dma_ctx.magic = DMA_MAGIC;
472 	dev_data->dma_ctx.dma_channels = DW_MAX_CHAN;
473 	dev_data->dma_ctx.atomic = dev_data->channels_atomic;
474 
475 	ace_gpdma_intc_unmask();
476 	return pm_device_driver_init(dev, gpdma_pm_action);
477 }
478 
479 static DEVICE_API(dma, intel_adsp_gpdma_driver_api) = {
480 	.config = intel_adsp_gpdma_config,
481 	.reload = intel_adsp_gpdma_copy,
482 	.start = intel_adsp_gpdma_start,
483 	.stop = intel_adsp_gpdma_stop,
484 	.suspend = dw_dma_suspend,
485 	.resume = dw_dma_resume,
486 	.get_status = intel_adsp_gpdma_get_status,
487 	.get_attribute = intel_adsp_gpdma_get_attribute,
488 };
489 
490 #define INTEL_ADSP_GPDMA_CHAN_ARB_DATA(inst)				\
491 	static struct dw_drv_plat_data dmac##inst = {			\
492 		.chan[0] = {						\
493 			.class  = 6,					\
494 			.weight = 0,					\
495 		},							\
496 		.chan[1] = {						\
497 			.class  = 6,					\
498 			.weight = 0,					\
499 		},							\
500 		.chan[2] = {						\
501 			.class  = 6,					\
502 			.weight = 0,					\
503 		},							\
504 		.chan[3] = {						\
505 			.class  = 6,					\
506 			.weight = 0,					\
507 		},							\
508 		.chan[4] = {						\
509 			.class  = 6,					\
510 			.weight = 0,					\
511 		},							\
512 		.chan[5] = {						\
513 			.class  = 6,					\
514 			.weight = 0,					\
515 		},							\
516 		.chan[6] = {						\
517 			.class  = 6,					\
518 			.weight = 0,					\
519 		},							\
520 		.chan[7] = {						\
521 			.class  = 6,					\
522 			.weight = 0,					\
523 		},							\
524 	}
525 
526 #define INTEL_ADSP_GPDMA_INIT(inst)					\
527 	INTEL_ADSP_GPDMA_CHAN_ARB_DATA(inst);				\
528 	static void intel_adsp_gpdma##inst##_irq_config(void);		\
529 									\
530 	static const struct intel_adsp_gpdma_cfg intel_adsp_gpdma##inst##_config = {\
531 		.dw_cfg = {						\
532 			.base = DT_INST_REG_ADDR(inst),		\
533 			.irq_config = intel_adsp_gpdma##inst##_irq_config,\
534 		},							\
535 		.shim = DT_INST_PROP_BY_IDX(inst, shim, 0),		\
536 	};								\
537 									\
538 	static struct intel_adsp_gpdma_data intel_adsp_gpdma##inst##_data = {\
539 		.dw_data = {						\
540 			.channel_data = &dmac##inst,			\
541 		},							\
542 	};								\
543 									\
544 	PM_DEVICE_DT_INST_DEFINE(inst, gpdma_pm_action);		\
545 									\
546 	DEVICE_DT_INST_DEFINE(inst,					\
547 			      &intel_adsp_gpdma_init,			\
548 			      PM_DEVICE_DT_INST_GET(inst),		\
549 			      &intel_adsp_gpdma##inst##_data,		\
550 			      &intel_adsp_gpdma##inst##_config, POST_KERNEL,\
551 			      CONFIG_DMA_INIT_PRIORITY,		\
552 			      &intel_adsp_gpdma_driver_api);		\
553 									\
554 	static void intel_adsp_gpdma##inst##_irq_config(void)		\
555 	{								\
556 		IRQ_CONNECT(DT_INST_IRQN(inst),			\
557 			    DT_INST_IRQ(inst, priority), dw_dma_isr,	\
558 			    DEVICE_DT_INST_GET(inst),			\
559 			    DT_INST_IRQ(inst, sense));			\
560 		irq_enable(DT_INST_IRQN(inst));			\
561 	}
562 
563 DT_INST_FOREACH_STATUS_OKAY(INTEL_ADSP_GPDMA_INIT)
564