1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 //Copyright(c) 2022 AMD. All rights reserved.
4 //
5 //Author: Basavaraj Hiregoudar <basavaraj.hiregoudar@amd.com>
6 // Bala Kishore <balakishore.pati@amd.com>
7
8 #include <sof/audio/component.h>
9 #include <platform/chip_registers.h>
10 #include <platform/fw_scratch_mem.h>
11 #include <platform/chip_offset_byte.h>
12 #include <sof/drivers/acp_dai_dma.h>
13 #include <rtos/timer.h>
14 #include <rtos/alloc.h>
15 #include <sof/lib/dma.h>
16 #include <sof/lib/io.h>
17 #include <sof/lib/notifier.h>
18 #include <sof/lib/uuid.h>
19 #include <sof/math/numbers.h>
20 #include <sof/platform.h>
21 #include <errno.h>
22 #include <stddef.h>
23 #include <stdint.h>
24
25
26 #define ACP_MAX_STREAMS 8
27 #define ACP_DMA_BUFFER_PERIOD_COUNT 2
28 #define ACP_SYST_MEM_WINDOW 0x4000000
29
30 /* Need to look for proper uuid for amd platform*/
31 DECLARE_SOF_UUID("acpdma", acpdma_uuid, 0x70f2d3f2, 0xcbb6, 0x4984,
32 0xa2, 0xd8, 0x0d, 0xd5, 0x14, 0xb8, 0x0b, 0xc2);
33 DECLARE_TR_CTX(acpdma_tr, SOF_UUID(acpdma_uuid), LOG_LEVEL_INFO);
34
35 struct acp_dma_config {
36 /* base address of dma buffer */
37 uint32_t base;
38 /* size of dma buffer */
39 uint32_t size;
40 /* write pointer of dma buffer */
41 uint32_t wr_ptr;
42 /* read pointer of dma buffer */
43 uint32_t rd_ptr;
44 /* read size of dma buffer */
45 uint32_t rd_size;
46 /* write size of dma buffer */
47 uint32_t wr_size;
48 /* system memory size defined for the stream */
49 uint32_t sys_buff_size;
50 /* virtual system memory offset for system memory buffer */
51 uint32_t phy_off;
52 };
53
54 struct acp_dma_chan_data {
55 /* channel index */
56 uint32_t idx;
57 /* stream direction */
58 uint32_t dir;
59 /* configuration data of dma */
60 struct acp_dma_config config[ACP_MAX_STREAMS];
61 };
62
dma_config_descriptor(uint32_t dscr_start_idx,uint32_t dscr_count,acp_cfg_dma_descriptor_t * psrc_dscr,acp_cfg_dma_descriptor_t * pdest_dscr)63 static void dma_config_descriptor(uint32_t dscr_start_idx, uint32_t dscr_count,
64 acp_cfg_dma_descriptor_t *psrc_dscr,
65 acp_cfg_dma_descriptor_t *pdest_dscr)
66 {
67 uint16_t dscr;
68
69 if ((dscr_count) && (psrc_dscr) && (pdest_dscr) &&
70 (dscr_start_idx < MAX_NUM_DMA_DESC_DSCR)) {
71 for (dscr = 0; dscr < dscr_count; dscr++) {
72 pdest_dscr[dscr_start_idx + dscr].src_addr =
73 psrc_dscr[dscr].src_addr;
74 pdest_dscr[dscr_start_idx + dscr].dest_addr =
75 psrc_dscr[dscr].dest_addr;
76 pdest_dscr[dscr_start_idx + dscr].trns_cnt.u32All =
77 psrc_dscr[dscr].trns_cnt.u32All;
78 }
79 }
80 }
81
dma_reconfig(struct dma_chan_data * channel,uint32_t bytes)82 static void dma_reconfig(struct dma_chan_data *channel, uint32_t bytes)
83 {
84 uint32_t strt_idx = 0;
85 uint32_t src = 0;
86 uint32_t dest = 0;
87 uint32_t tail = 0, head = 0;
88 uint32_t src1 = 0;
89 uint32_t dest1 = 0;
90
91 acp_cfg_dma_descriptor_t psrc_dscr[ACP_MAX_STREAMS];
92 acp_cfg_dma_descriptor_t *pdest_dscr;
93 acp_dma_cntl_0_t dma_cntl;
94 struct acp_dma_chan_data *acp_dma_chan;
95 struct acp_dma_config *dma_cfg;
96
97 volatile acp_scratch_mem_config_t *pscratch_mem_cfg =
98 (volatile acp_scratch_mem_config_t *)(PU_SCRATCH_REG_BASE + SCRATCH_REG_OFFSET);
99 acp_dma_chan = dma_chan_get_data(channel);
100 dma_cfg = &acp_dma_chan->config[channel->index];
101 pdest_dscr = (acp_cfg_dma_descriptor_t *)(pscratch_mem_cfg->acp_cfg_dma_descriptor);
102 if (channel->direction == DMA_DIR_HMEM_TO_LMEM) {
103 head = bytes;
104 /* Update the read and write pointers */
105 dma_cfg->rd_ptr = ACP_SYST_MEM_WINDOW + dma_cfg->phy_off + dma_cfg->rd_size;
106 dma_cfg->wr_ptr = dma_cfg->base + dma_cfg->wr_size;
107 src = dma_cfg->rd_ptr;
108 dest = dma_cfg->wr_ptr;
109 psrc_dscr[strt_idx].src_addr = src;
110 dest = (dest & ACP_DRAM_ADDRESS_MASK);
111 /* Known Data hack */
112 psrc_dscr[strt_idx].dest_addr = (dest | ACP_SRAM);
113 psrc_dscr[strt_idx].trns_cnt.bits.trns_cnt = bytes;
114 /* Configure a single descrption */
115 dma_config_descriptor(strt_idx, 1, psrc_dscr, pdest_dscr);
116 dma_chan_reg_write(channel, ACP_DMA_DSCR_CNT_0, 1);
117 /* Check for wrap-around case for system buffer */
118 if (dma_cfg->rd_size + bytes > dma_cfg->sys_buff_size) {
119 /* Configure the descriptor for head and tail */
120 /* values for the wrap around case */
121 tail = dma_cfg->sys_buff_size - dma_cfg->rd_size;
122 head = bytes - tail;
123 psrc_dscr[strt_idx].trns_cnt.bits.trns_cnt = tail;
124 psrc_dscr[strt_idx + 1].src_addr = ACP_SYST_MEM_WINDOW + dma_cfg->phy_off;
125 dest1 = dest+tail;
126 dest1 = (dest1 & ACP_DRAM_ADDRESS_MASK);
127 psrc_dscr[strt_idx + 1].dest_addr = (dest1 | ACP_SRAM);
128 psrc_dscr[strt_idx + 1].trns_cnt.bits.trns_cnt = head;
129 dma_config_descriptor(strt_idx, 2, psrc_dscr, pdest_dscr);
130 dma_chan_reg_write(channel, ACP_DMA_DSCR_CNT_0, 2);
131 dma_cfg->rd_size = 0;
132 }
133 dma_cfg->rd_size += head;
134 dma_cfg->rd_size %= dma_cfg->sys_buff_size;
135 dma_cfg->wr_size += bytes;
136 dma_cfg->wr_size %= dma_cfg->size;
137 } else if (channel->direction == DMA_DIR_LMEM_TO_HMEM) {
138 head = bytes;
139 dma_cfg->wr_ptr = ACP_SYST_MEM_WINDOW + dma_cfg->phy_off + dma_cfg->wr_size;
140 dma_cfg->rd_ptr = dma_cfg->base + dma_cfg->rd_size;
141 src = dma_cfg->rd_ptr;
142 dest = dma_cfg->wr_ptr;
143 src = (src & ACP_DRAM_ADDRESS_MASK);
144 psrc_dscr[strt_idx].src_addr = (src | ACP_SRAM);
145 psrc_dscr[strt_idx].dest_addr = dest;
146 psrc_dscr[strt_idx].trns_cnt.bits.trns_cnt = bytes;
147 /* Configure a single descrption */
148 dma_config_descriptor(strt_idx, 1, psrc_dscr, pdest_dscr);
149 dma_chan_reg_write(channel, ACP_DMA_DSCR_CNT_0, 1);
150 /* Check for wrap-around case for system buffer */
151 if (dma_cfg->wr_size + bytes > dma_cfg->sys_buff_size) {
152 /* Configure the descriptor for head and
153 * tail values for the wrap around case
154 */
155 tail = dma_cfg->sys_buff_size - dma_cfg->wr_size;
156 head = bytes - tail;
157 psrc_dscr[strt_idx].trns_cnt.bits.trns_cnt = tail;
158 src1 = src + tail;
159 psrc_dscr[strt_idx+1].dest_addr = ACP_SYST_MEM_WINDOW + dma_cfg->phy_off;
160 psrc_dscr[strt_idx+1].trns_cnt.bits.trns_cnt = head;
161 src1 = (src1 & ACP_DRAM_ADDRESS_MASK);
162 psrc_dscr[strt_idx+1].src_addr = (src1 | ACP_SRAM);
163 dma_config_descriptor(strt_idx, 2, psrc_dscr, pdest_dscr);
164 dma_chan_reg_write(channel, ACP_DMA_DSCR_CNT_0, 2);
165 dma_cfg->wr_size = 0;
166 }
167 dma_cfg->wr_size += head;
168 dma_cfg->wr_size %= dma_cfg->sys_buff_size;
169 dma_cfg->rd_size += bytes;
170 dma_cfg->rd_size %= dma_cfg->size;
171 }
172 /* clear the dma channel control bits */
173 dma_cntl = (acp_dma_cntl_0_t) dma_chan_reg_read(channel, ACP_DMA_CNTL_0);
174 dma_cntl.bits.dmachrun = 0;
175 dma_chan_reg_write(channel, ACP_DMA_CNTL_0, dma_cntl.u32all);
176 /* Load start index of decriptor and priority */
177 dma_chan_reg_write(channel, ACP_DMA_DSCR_STRT_IDX_0, strt_idx);
178 dma_chan_reg_write(channel, ACP_DMA_PRIO_0, 1);
179 channel->status = COMP_STATE_PREPARE;
180 }
181
acp_dma_channel_get(struct dma * dma,unsigned int req_chan)182 static struct dma_chan_data *acp_dma_channel_get(struct dma *dma,
183 unsigned int req_chan)
184 {
185 k_spinlock_key_t key;
186 struct dma_chan_data *channel;
187
188 key = k_spin_lock(&dma->lock);
189 if (req_chan >= dma->plat_data.channels) {
190 k_spin_unlock(&dma->lock, key);
191 tr_err(&acpdma_tr, "DMA: Channel %d not in range", req_chan);
192 return NULL;
193 }
194 channel = &dma->chan[req_chan];
195 if (channel->status != COMP_STATE_INIT) {
196 k_spin_unlock(&dma->lock, key);
197 tr_err(&acpdma_tr, "DMA: channel already in use %d", req_chan);
198 return NULL;
199 }
200 atomic_add(&dma->num_channels_busy, 1);
201 channel->status = COMP_STATE_READY;
202 k_spin_unlock(&dma->lock, key);
203 /* reset read and write pointers */
204 struct acp_dma_chan_data *acp_dma_chan = dma_chan_get_data(channel);
205
206 acp_dma_chan->config[req_chan].rd_size = 0;
207 acp_dma_chan->config[req_chan].wr_size = 0;
208 return channel;
209 }
210
acp_dma_channel_put(struct dma_chan_data * channel)211 static void acp_dma_channel_put(struct dma_chan_data *channel)
212 {
213 k_spinlock_key_t key;
214 struct acp_dma_chan_data *acp_dma_chan = dma_chan_get_data(channel);
215
216 key = k_spin_lock(&channel->dma->lock);
217 channel->status = COMP_STATE_INIT;
218 atomic_sub(&channel->dma->num_channels_busy, 1);
219 k_spin_unlock(&channel->dma->lock, key);
220 /* reset read and write pointer */
221 acp_dma_chan->config[channel->index].rd_size = 0;
222 acp_dma_chan->config[channel->index].wr_size = 0;
223 }
224
225 /* Stop the requested channel */
acp_dma_stop(struct dma_chan_data * channel)226 static int acp_dma_stop(struct dma_chan_data *channel)
227 {
228 acp_dma_cntl_0_t dma_cntl;
229 acp_dma_ch_sts_t ch_sts;
230 uint32_t dmach_mask;
231 uint32_t delay_cnt = 10000;
232
233 switch (channel->status) {
234 case COMP_STATE_READY:
235 case COMP_STATE_PREPARE:
236 return 0; /* do not try to stop multiple times */
237 case COMP_STATE_PAUSED:
238 case COMP_STATE_ACTIVE:
239 break;
240 default:
241 return -EINVAL;
242 }
243 channel->status = COMP_STATE_READY;
244 dmach_mask = (1 << channel->index);
245 dma_cntl = (acp_dma_cntl_0_t) dma_chan_reg_read(channel, ACP_DMA_CNTL_0);
246 /* Do the HW stop of the DMA */
247 /* set DMAChRst bit to stop the transfer */
248 dma_cntl.bits.dmachrun = 0;
249 dma_cntl.bits.dmachiocen = 0;
250 dma_chan_reg_write(channel, ACP_DMA_CNTL_0, dma_cntl.u32all);
251 ch_sts = (acp_dma_ch_sts_t) dma_reg_read(channel->dma, ACP_DMA_CH_STS);
252 if (ch_sts.bits.dmachrunsts & dmach_mask) {
253 /* set the reset bit for this channel to stop the dma transfer */
254 dma_cntl.bits.dmachrst = 1;
255 dma_chan_reg_write(channel, ACP_DMA_CNTL_0, dma_cntl.u32all);
256 }
257
258 while (delay_cnt > 0) {
259 ch_sts = (acp_dma_ch_sts_t) dma_reg_read(channel->dma, ACP_DMA_CH_STS);
260 if (!(ch_sts.bits.dmachrunsts & dmach_mask)) {
261 /* clear the reset flag after successfully stopping the dma transfer
262 * and break from the loop
263 */
264 dma_cntl.bits.dmachrst = 0;
265 dma_chan_reg_write(channel, ACP_DMA_CNTL_0, dma_cntl.u32all);
266 break;
267 }
268 delay_cnt--;
269 }
270 return 0;
271 }
272
acp_dma_start(struct dma_chan_data * channel)273 static int acp_dma_start(struct dma_chan_data *channel)
274 {
275 acp_dma_cntl_0_t dma_cntl;
276 acp_dma_ch_sts_t dma_sts;
277 uint32_t chan_sts;
278 int ret = 0;
279 struct timer *timer = timer_get();
280 uint64_t deadline = platform_timer_get(timer) +
281 clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, 1) * 500 / 1000;
282 if (channel->status != COMP_STATE_PREPARE &&
283 channel->status != COMP_STATE_SUSPEND)
284 return -EINVAL;
285 channel->status = COMP_STATE_ACTIVE;
286 /* Clear DMAChRun before starting the DMA Ch */
287 dma_cntl = (acp_dma_cntl_0_t) dma_chan_reg_read(channel, ACP_DMA_CNTL_0);
288 dma_cntl.bits.dmachrun = 0;
289 dma_cntl.bits.dmachiocen = 0;
290 dma_chan_reg_write(channel, ACP_DMA_CNTL_0, dma_cntl.u32all);
291
292 dma_cntl = (acp_dma_cntl_0_t) dma_chan_reg_read(channel, ACP_DMA_CNTL_0);
293 dma_cntl.bits.dmachrun = 1;
294 dma_cntl.bits.dmachiocen = 0;
295
296 /* set dmachrun bit to start the transfer */
297 dma_chan_reg_write(channel, ACP_DMA_CNTL_0, dma_cntl.u32all);
298
299 /* poll for the status bit
300 * to finish the dma transfer
301 * then initiate call back function
302 */
303 dma_sts = (acp_dma_ch_sts_t)dma_reg_read(channel->dma, ACP_DMA_CH_STS);
304 chan_sts = dma_sts.u32all & (1<<channel->index);
305 while (chan_sts) {
306 if (deadline < platform_timer_get(timer)) {
307 /* safe check in case we've got preempted after read */
308 if (chan_sts)
309 return 0;
310 tr_err(&acpdma_tr, "acp-dma: timed out for dma start");
311 return -ETIME;
312 }
313 dma_sts = (acp_dma_ch_sts_t)dma_reg_read(channel->dma, ACP_DMA_CH_STS);
314 chan_sts = dma_sts.u32all & (1<<channel->index);
315 }
316 return ret;
317 }
318
acp_dma_release(struct dma_chan_data * channel)319 static int acp_dma_release(struct dma_chan_data *channel)
320 {
321 tr_info(&acpdma_tr, "DMA: release(%d)", channel->index);
322 if (channel->status != COMP_STATE_PAUSED)
323 return -EINVAL;
324 channel->status = COMP_STATE_ACTIVE;
325 return 0;
326 }
327
acp_dma_pause(struct dma_chan_data * channel)328 static int acp_dma_pause(struct dma_chan_data *channel)
329 {
330 tr_info(&acpdma_tr, "h/w pause is not supported, changing the status of(%d) channel",
331 channel->index);
332 if (channel->status != COMP_STATE_ACTIVE)
333 return -EINVAL;
334 channel->status = COMP_STATE_PAUSED;
335 return 0;
336 }
337
acp_dma_copy(struct dma_chan_data * channel,int bytes,uint32_t flags)338 static int acp_dma_copy(struct dma_chan_data *channel, int bytes, uint32_t flags)
339 {
340 struct dma_cb_data next = {
341 .channel = channel,
342 .elem.size = bytes,
343 };
344 acp_dma_ch_sts_t ch_sts;
345 uint32_t dmach_mask = (1 << channel->index);
346 int ret = 0;
347
348 if (flags & DMA_COPY_ONE_SHOT) {
349 ret = acp_dma_start(channel);
350 if (ret < 0)
351 return ret;
352 ch_sts = (acp_dma_ch_sts_t) dma_reg_read(channel->dma, ACP_DMA_CH_STS);
353 while (ch_sts.bits.dmachrunsts & dmach_mask) {
354 ch_sts = (acp_dma_ch_sts_t) dma_reg_read(channel->dma, ACP_DMA_CH_STS);
355 if (!(ch_sts.bits.dmachrunsts & dmach_mask))
356 break;
357 }
358 ret = acp_dma_stop(channel);
359 }
360 /* Reconfigure dma descriptors for stream channels only */
361 if (channel->index != DMA_TRACE_CHANNEL) {
362 /* Reconfigure the dma descriptors for next buffer of data after the call back */
363 dma_reconfig(channel, bytes);
364 /* Start the dma for requested channel */
365 acp_dma_start(channel);
366 /* Stop the dma for requested channel */
367 acp_dma_stop(channel);
368 }
369 notifier_event(channel, NOTIFIER_ID_DMA_COPY,
370 NOTIFIER_TARGET_CORE_LOCAL, &next, sizeof(next));
371 return ret;
372 }
373
acp_dma_status(struct dma_chan_data * channel,struct dma_chan_status * status,uint8_t direction)374 static int acp_dma_status(struct dma_chan_data *channel,
375 struct dma_chan_status *status, uint8_t direction)
376 {
377 status->state = channel->status;
378 status->flags = 0;
379 status->timestamp = timer_get_system(timer_get());
380 return 0;
381 }
382
383 /* Some set_config helper functions */
dma_setup(struct dma_chan_data * channel,struct dma_sg_elem_array * sgelems,uint32_t dir)384 static int dma_setup(struct dma_chan_data *channel,
385 struct dma_sg_elem_array *sgelems, uint32_t dir)
386 {
387 uint32_t dscr_cnt, dscr = 0;
388 uint32_t tc;
389 uint16_t dscr_strt_idx = 0;
390 uint32_t *phy_off;
391 uint32_t *syst_buff_size;
392 uint32_t src;
393 uint32_t dest;
394 uint32_t buff_size = 0;
395 acp_dma_cntl_0_t dma_cntl;
396 struct acp_dma_config *dma_cfg;
397 struct acp_dma_chan_data *acp_dma_chan = dma_chan_get_data(channel);
398
399 volatile acp_scratch_mem_config_t *pscratch_mem_cfg =
400 (volatile acp_scratch_mem_config_t *)(PU_SCRATCH_REG_BASE + SCRATCH_REG_OFFSET);
401 dscr_cnt = sgelems->count;
402 /* Trace uses descriptor from index seven */
403 /* and other streams use descriptors from zero */
404 if (channel->index == DMA_TRACE_CHANNEL)
405 dscr_strt_idx = DMA_TRACE_CHANNEL;
406 else
407 dscr_strt_idx = 0;
408
409 /* ACP DMA Descriptor in scratch memory */
410 acp_cfg_dma_descriptor_t *dma_config_dscr;
411
412 dma_config_dscr = (acp_cfg_dma_descriptor_t *)(pscratch_mem_cfg->acp_cfg_dma_descriptor);
413 /* physical offset of system memory */
414 phy_off = (uint32_t *)(pscratch_mem_cfg->phy_offset);
415 /* size of system memory buffer */
416 syst_buff_size = (uint32_t *)(pscratch_mem_cfg->syst_buff_size);
417 for (dscr = 0; dscr < dscr_cnt; dscr++) {
418 if (dir == DMA_DIR_HMEM_TO_LMEM) {
419 if (channel->index != DMA_TRACE_CHANNEL)
420 dma_config_dscr[dscr_strt_idx + dscr].src_addr =
421 (phy_off[channel->index] + ACP_SYST_MEM_WINDOW + buff_size);
422 else
423 dma_config_dscr[dscr_strt_idx + dscr].src_addr =
424 sgelems->elems[dscr].src + ACP_SYST_MEM_WINDOW;
425 dest = sgelems->elems[dscr].dest;
426 dest = (dest & ACP_DRAM_ADDRESS_MASK);
427 dma_config_dscr[dscr_strt_idx + dscr].dest_addr = (dest | ACP_SRAM);
428 dma_config_dscr[dscr_strt_idx + dscr].trns_cnt.u32All = 0;
429 dma_config_dscr[dscr_strt_idx + dscr].trns_cnt.bits.trns_cnt =
430 sgelems->elems[dscr].size;
431 } else {
432 if (channel->index != DMA_TRACE_CHANNEL)
433 dma_config_dscr[dscr_strt_idx + dscr].dest_addr =
434 (phy_off[channel->index] + ACP_SYST_MEM_WINDOW + buff_size);
435 else
436 dma_config_dscr[dscr_strt_idx + dscr].dest_addr =
437 (sgelems->elems[dscr].dest + ACP_SYST_MEM_WINDOW);
438 src = sgelems->elems[dscr].src;
439 src = (src & ACP_DRAM_ADDRESS_MASK);
440 dma_config_dscr[dscr_strt_idx + dscr].src_addr =
441 (src | ACP_SRAM);/*rembrandt-arch*/
442 dma_config_dscr[dscr_strt_idx + dscr].trns_cnt.u32All = 0;
443 dma_config_dscr[dscr_strt_idx + dscr].trns_cnt.bits.trns_cnt =
444 sgelems->elems[dscr].size;
445 }
446 dma_config_dscr[dscr_strt_idx + dscr].trns_cnt.u32All = 0;
447 dma_config_dscr[dscr_strt_idx + dscr].trns_cnt.bits.trns_cnt =
448 sgelems->elems[dscr].size;
449 buff_size = sgelems->elems[dscr].size;
450 }
451 dma_config_dscr[dscr_strt_idx + (dscr-1)].trns_cnt.bits.ioc = 0;
452 dma_cfg = &acp_dma_chan->config[channel->index];
453 /* bytes of data to be transferred for the dma */
454 tc = dma_config_dscr[dscr_strt_idx].trns_cnt.bits.trns_cnt;
455 /* DMA configuration for stream */
456 if (channel->index != DMA_TRACE_CHANNEL) {
457 acp_dma_chan->dir = dir;
458 acp_dma_chan->idx = channel->index;
459 dma_cfg->phy_off = phy_off[channel->index];
460 dma_cfg->size = tc * dscr_cnt;
461 dma_cfg->sys_buff_size = syst_buff_size[channel->index];
462
463 if (dir == DMA_DIR_HMEM_TO_LMEM) {
464 /* Playback */
465 dma_config_dscr[dscr_strt_idx].dest_addr =
466 (dma_config_dscr[dscr_strt_idx].dest_addr & ACP_DRAM_ADDRESS_MASK);
467 dma_cfg->base = dma_config_dscr[dscr_strt_idx].dest_addr | ACP_SRAM;
468 dma_cfg->wr_size = 0;
469 dma_cfg->rd_size = dma_cfg->size;
470 } else {
471 /* Capture */
472 dma_config_dscr[dscr_strt_idx].src_addr =
473 (dma_config_dscr[dscr_strt_idx].src_addr & ACP_DRAM_ADDRESS_MASK);
474 dma_cfg->base = dma_config_dscr[dscr_strt_idx].src_addr | ACP_SRAM;
475 dma_cfg->wr_size = dma_cfg->size;
476 dma_cfg->rd_size = 0;
477 }
478 }
479 /* clear the dma channel control bits */
480 dma_cntl = (acp_dma_cntl_0_t) dma_chan_reg_read(channel, ACP_DMA_CNTL_0);
481 dma_cntl.bits.dmachrun = 0;
482 dma_cntl.bits.dmachiocen = 0;
483 dma_chan_reg_write(channel, ACP_DMA_CNTL_0, dma_cntl.u32all);
484
485 /* Program DMAChDscrStrIdx to the index
486 * number of the first descriptor to be processed.
487 */
488 dma_chan_reg_write(channel, ACP_DMA_DSCR_STRT_IDX_0, dscr_strt_idx);
489 /* program DMAChDscrdscrcnt to the
490 * number of descriptors to be processed in the transfer
491 */
492 dma_chan_reg_write(channel, ACP_DMA_DSCR_CNT_0, dscr_cnt);
493 /* set DMAChPrioLvl according to the priority */
494 dma_chan_reg_write(channel, ACP_DMA_PRIO_0, 1);
495 channel->status = COMP_STATE_PREPARE;
496 return 0;
497 }
498
499 /* set the DMA channel configuration, source/target address, buffer sizes */
acp_dma_set_config(struct dma_chan_data * channel,struct dma_sg_config * config)500 static int acp_dma_set_config(struct dma_chan_data *channel,
501 struct dma_sg_config *config)
502 {
503 uint32_t dir;
504
505 channel->direction = config->direction;
506 dir = config->direction;
507 if (config->cyclic) {
508 tr_err(&acpdma_tr,
509 "DMA: cyclic configurations are not supported");
510 return -EINVAL;
511 }
512 if (config->scatter) {
513 tr_err(&acpdma_tr,
514 "DMA: scatter is not supported Chan.Index %d scatter %d",
515 channel->index, config->scatter);
516 return -EINVAL;
517 }
518 return dma_setup(channel, &config->elem_array, dir);
519 }
520
acp_dma_probe(struct dma * dma)521 static int acp_dma_probe(struct dma *dma)
522 {
523 struct acp_dma_chan_data *acp_dma_chan;
524 int channel;
525
526 if (dma->chan) {
527 tr_err(&acpdma_tr, "DMA: Already probe");
528 return -EEXIST;
529 }
530 dma->chan = rzalloc(SOF_MEM_ZONE_RUNTIME, 0, SOF_MEM_CAPS_RAM,
531 dma->plat_data.channels *
532 sizeof(struct dma_chan_data));
533 if (!dma->chan) {
534 tr_err(&acpdma_tr, "DMA: unable to allocate channel context");
535 return -ENOMEM;
536 }
537 for (channel = 0; channel < dma->plat_data.channels; channel++) {
538 dma->chan[channel].dma = dma;
539 dma->chan[channel].index = channel;
540 dma->chan[channel].status = COMP_STATE_INIT;
541 acp_dma_chan = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0,
542 SOF_MEM_CAPS_RAM,
543 sizeof(struct acp_dma_chan_data));
544 if (!acp_dma_chan) {
545 rfree(dma->chan);
546 tr_err(&acpdma_tr, "acp-dma: %d channel %d private data alloc failed",
547 dma->plat_data.id, channel);
548 return -ENOMEM;
549 }
550 dma_chan_set_data(&dma->chan[channel], acp_dma_chan);
551 }
552 return 0;
553 }
554
acp_dma_remove(struct dma * dma)555 static int acp_dma_remove(struct dma *dma)
556 {
557 int channel;
558
559 if (!dma->chan) {
560 tr_err(&acpdma_tr, "DMA: Invalid remove call");
561 return 0;
562 }
563 for (channel = 0; channel < dma->plat_data.channels; channel++)
564 rfree(dma->chan[channel].priv_data);
565 rfree(dma->chan);
566 dma->chan = NULL;
567
568 return 0;
569 }
570
acp_dma_interrupt(struct dma_chan_data * channel,enum dma_irq_cmd cmd)571 static int acp_dma_interrupt(struct dma_chan_data *channel, enum dma_irq_cmd cmd)
572 {
573 uint32_t status;
574
575 if (channel->status == COMP_STATE_INIT)
576 return 0;
577
578 switch (cmd) {
579 case DMA_IRQ_STATUS_GET:
580 status = dma_reg_read(channel->dma, ACP_DSP0_INTR_STAT) & 0xFF;
581 return status & (1 << channel->index);
582 case DMA_IRQ_CLEAR:
583 status = dma_reg_read(channel->dma, ACP_DSP0_INTR_STAT);
584 status = status & (1 << channel->index);
585 dma_reg_write(channel->dma, ACP_DSP0_INTR_STAT, status);
586 return 0;
587 case DMA_IRQ_MASK:
588 status = dma_reg_read(channel->dma, ACP_DSP0_INTR_CNTL);
589 status = status & (~(1 << channel->index));
590 dma_reg_write(channel->dma, ACP_DSP0_INTR_CNTL, status);
591 return 0;
592 case DMA_IRQ_UNMASK:
593 status = dma_reg_read(channel->dma, ACP_DSP0_INTR_CNTL);
594 status = status | (1 << channel->index);
595 dma_reg_write(channel->dma, ACP_DSP0_INTR_CNTL, status);
596 return 0;
597 default:
598 return -EINVAL;
599 }
600 }
601
acp_dma_get_attribute(struct dma * dma,uint32_t type,uint32_t * value)602 static int acp_dma_get_attribute(struct dma *dma, uint32_t type, uint32_t *value)
603 {
604 switch (type) {
605 case DMA_ATTR_BUFFER_ALIGNMENT:
606 case DMA_ATTR_COPY_ALIGNMENT:
607 *value = ACP_DMA_BUFFER_ALIGN_128;
608 break;
609 case DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT:
610 *value = PLATFORM_DCACHE_ALIGN;
611 break;
612 case DMA_ATTR_BUFFER_PERIOD_COUNT:
613 *value = ACP_DMA_BUFFER_PERIOD_COUNT;
614 break;
615 default:
616 return -ENOENT; /* Attribute not found */
617 }
618 return 0;
619 }
620
acp_dma_get_data_size(struct dma_chan_data * channel,uint32_t * avail,uint32_t * free)621 static int acp_dma_get_data_size(struct dma_chan_data *channel,
622 uint32_t *avail, uint32_t *free)
623 {
624 struct acp_dma_chan_data *acp_dma_chan = dma_chan_get_data(channel);
625 uint32_t data_size = 0;
626 uint32_t tc = 0; /* transfer count in bytes */
627
628 tc = acp_dma_chan->config[channel->index].size;
629 data_size = (uint32_t)tc;
630 switch (channel->direction) {
631 case DMA_DIR_MEM_TO_DEV:
632 case DMA_DIR_HMEM_TO_LMEM:
633 *avail = ABS(data_size) / 2;
634 break;
635 case DMA_DIR_DEV_TO_MEM:
636 case DMA_DIR_LMEM_TO_HMEM:
637 *free = ABS(data_size) / 2;
638 break;
639 default:
640 tr_err(&acpdma_tr, "dma_get_data_size() Invalid direction %d",
641 channel->direction);
642 return -EINVAL;
643 }
644 return 0;
645 }
646
647 const struct dma_ops acp_dma_ops = {
648 .channel_get = acp_dma_channel_get,
649 .channel_put = acp_dma_channel_put,
650 .start = acp_dma_start,
651 .stop = acp_dma_stop,
652 .pause = acp_dma_pause,
653 .release = acp_dma_release,
654 .copy = acp_dma_copy,
655 .status = acp_dma_status,
656 .set_config = acp_dma_set_config,
657 .probe = acp_dma_probe,
658 .remove = acp_dma_remove,
659 .interrupt = acp_dma_interrupt,
660 .get_attribute = acp_dma_get_attribute,
661 .get_data_size = acp_dma_get_data_size,
662 };
663