1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 //Copyright(c) 2021 AMD. All rights reserved.
4 //
5 //Author: Basavaraj Hiregoudar <basavaraj.hiregoudar@amd.com>
6 // Anup Kulkarni <anup.kulkarni@amd.com>
7 // Bala Kishore <balakishore.pati@amd.com>
8
9 #include <sof/audio/component.h>
10 #include <platform/chip_registers.h>
11 #include <platform/fw_scratch_mem.h>
12 #include <platform/chip_offset_byte.h>
13 #include <sof/drivers/acp_dai_dma.h>
14 #include <sof/drivers/timer.h>
15 #include <sof/lib/alloc.h>
16 #include <sof/lib/dma.h>
17 #include <sof/lib/io.h>
18 #include <sof/lib/notifier.h>
19 #include <sof/lib/uuid.h>
20 #include <sof/math/numbers.h>
21 #include <sof/platform.h>
22 #include <errno.h>
23 #include <stddef.h>
24 #include <stdint.h>
25
26
27 #define ACP_MAX_STREAMS 8
28 #define ACP_DMA_BUFFER_PERIOD_COUNT 2
29 #define ACP_SYST_MEM_WINDOW 0x4000000
30
31 /* Need to look for proper uuid for amd platform*/
32 DECLARE_SOF_UUID("acpdma", acpdma_uuid, 0x70f2d3f2, 0xcbb6, 0x4984,
33 0xa2, 0xd8, 0x0d, 0xd5, 0x14, 0xb8, 0x0b, 0xc2);
34 DECLARE_TR_CTX(acpdma_tr, SOF_UUID(acpdma_uuid), LOG_LEVEL_INFO);
35
36 struct acp_dma_config {
37 /* base address of dma buffer */
38 uint32_t base;
39 /* size of dma buffer */
40 uint32_t size;
41 /* write pointer of dma buffer */
42 uint32_t wr_ptr;
43 /* read pointer of dma buffer */
44 uint32_t rd_ptr;
45 /* read size of dma buffer */
46 uint32_t rd_size;
47 /* write size of dma buffer */
48 uint32_t wr_size;
49 /* system memory size defined for the stream */
50 uint32_t sys_buff_size;
51 /* virtual system memory offset for system memory buffer */
52 uint32_t phy_off;
53 };
54
55 struct acp_dma_chan_data {
56 /* channel index */
57 uint32_t idx;
58 /* stream direction */
59 uint32_t dir;
60 /* configuration data of dma */
61 struct acp_dma_config config[ACP_MAX_STREAMS];
62 };
63
dma_config_descriptor(uint32_t dscr_start_idx,uint32_t dscr_count,acp_cfg_dma_descriptor_t * psrc_dscr,acp_cfg_dma_descriptor_t * pdest_dscr)64 static void dma_config_descriptor(uint32_t dscr_start_idx, uint32_t dscr_count,
65 acp_cfg_dma_descriptor_t *psrc_dscr,
66 acp_cfg_dma_descriptor_t *pdest_dscr)
67 {
68 uint16_t dscr;
69
70 if ((dscr_count) && (psrc_dscr) && (pdest_dscr) &&
71 (dscr_start_idx < MAX_NUM_DMA_DESC_DSCR)) {
72 for (dscr = 0; dscr < dscr_count; dscr++) {
73 pdest_dscr[dscr_start_idx + dscr].src_addr =
74 psrc_dscr[dscr].src_addr;
75 pdest_dscr[dscr_start_idx + dscr].dest_addr =
76 psrc_dscr[dscr].dest_addr;
77 pdest_dscr[dscr_start_idx + dscr].trns_cnt.u32All =
78 psrc_dscr[dscr].trns_cnt.u32All;
79 }
80 }
81 }
82
dma_reconfig(struct dma_chan_data * channel,uint32_t bytes)83 static void dma_reconfig(struct dma_chan_data *channel, uint32_t bytes)
84 {
85 uint32_t strt_idx = 0;
86 uint32_t src;
87 uint32_t dest;
88 uint32_t tail, head;
89
90 acp_cfg_dma_descriptor_t psrc_dscr[ACP_MAX_STREAMS];
91 acp_cfg_dma_descriptor_t *pdest_dscr;
92 acp_dma_cntl_0_t dma_cntl;
93 struct acp_dma_chan_data *acp_dma_chan;
94 struct acp_dma_config *dma_cfg;
95
96 volatile acp_scratch_mem_config_t *pscratch_mem_cfg =
97 (volatile acp_scratch_mem_config_t *)(PU_REGISTER_BASE + SCRATCH_REG_OFFSET);
98 acp_dma_chan = dma_chan_get_data(channel);
99 dma_cfg = &acp_dma_chan->config[channel->index];
100 pdest_dscr = (acp_cfg_dma_descriptor_t *)(pscratch_mem_cfg->acp_cfg_dma_descriptor);
101 if (channel->direction == DMA_DIR_HMEM_TO_LMEM) {
102 head = bytes;
103 /* Update the read and write pointers */
104 dma_cfg->rd_ptr = ACP_SYST_MEM_WINDOW + dma_cfg->phy_off + dma_cfg->rd_size;
105 dma_cfg->wr_ptr = dma_cfg->base + dma_cfg->wr_size;
106 src = dma_cfg->rd_ptr;
107 dest = dma_cfg->wr_ptr;
108 psrc_dscr[strt_idx].src_addr = src;
109 psrc_dscr[strt_idx].dest_addr = dest;
110 psrc_dscr[strt_idx].trns_cnt.bits.trns_cnt = bytes;
111 /* Configure a single descrption */
112 dma_config_descriptor(strt_idx, 1, psrc_dscr, pdest_dscr);
113 dma_chan_reg_write(channel, ACP_DMA_DSCR_CNT_0, 1);
114 /* Check for wrap-around case for system buffer */
115 if (dma_cfg->rd_size + bytes > dma_cfg->sys_buff_size) {
116 /* Configure the descriptor for head and tail */
117 /* values for the wrap around case */
118 tail = dma_cfg->sys_buff_size - dma_cfg->rd_size;
119 head = bytes - tail;
120 psrc_dscr[strt_idx].trns_cnt.bits.trns_cnt = tail;
121 psrc_dscr[strt_idx+1].src_addr = ACP_SYST_MEM_WINDOW + dma_cfg->phy_off;
122 psrc_dscr[strt_idx+1].dest_addr = dest+tail;
123 psrc_dscr[strt_idx+1].trns_cnt.bits.trns_cnt = head;
124 dma_config_descriptor(strt_idx, 2, psrc_dscr, pdest_dscr);
125 dma_chan_reg_write(channel, ACP_DMA_DSCR_CNT_0, 2);
126 dma_cfg->rd_size = 0;
127 }
128 dma_cfg->rd_size += head;
129 dma_cfg->rd_size %= dma_cfg->sys_buff_size;
130 dma_cfg->wr_size += bytes;
131 dma_cfg->wr_size %= dma_cfg->size;
132 } else if (channel->direction == DMA_DIR_LMEM_TO_HMEM) {
133 head = bytes;
134 dma_cfg->wr_ptr = ACP_SYST_MEM_WINDOW + dma_cfg->phy_off + dma_cfg->wr_size;
135 dma_cfg->rd_ptr = dma_cfg->base + dma_cfg->rd_size;
136 src = dma_cfg->rd_ptr;
137 dest = dma_cfg->wr_ptr;
138 psrc_dscr[strt_idx].src_addr = src;
139 psrc_dscr[strt_idx].dest_addr = dest;
140 psrc_dscr[strt_idx].trns_cnt.bits.trns_cnt = bytes;
141 /* Configure a single descrption */
142 dma_config_descriptor(strt_idx, 1, psrc_dscr, pdest_dscr);
143 dma_chan_reg_write(channel, ACP_DMA_DSCR_CNT_0, 1);
144 /* Check for wrap-around case for system buffer */
145 if (dma_cfg->wr_size + bytes > dma_cfg->sys_buff_size) {
146 /* Configure the descriptor for head and
147 * tail values for the wrap around case
148 */
149 tail = dma_cfg->sys_buff_size - dma_cfg->wr_size;
150 head = bytes - tail;
151 psrc_dscr[strt_idx].trns_cnt.bits.trns_cnt = tail;
152 psrc_dscr[strt_idx+1].src_addr = src + tail;
153 psrc_dscr[strt_idx+1].dest_addr = ACP_SYST_MEM_WINDOW + dma_cfg->phy_off;
154 psrc_dscr[strt_idx+1].trns_cnt.bits.trns_cnt = head;
155 dma_config_descriptor(strt_idx, 2, psrc_dscr, pdest_dscr);
156 dma_chan_reg_write(channel, ACP_DMA_DSCR_CNT_0, 2);
157 dma_cfg->wr_size = 0;
158 }
159 dma_cfg->wr_size += head;
160 dma_cfg->wr_size %= dma_cfg->sys_buff_size;
161 dma_cfg->rd_size += bytes;
162 dma_cfg->rd_size %= dma_cfg->size;
163 }
164 /* clear the dma channel control bits */
165 dma_cntl = (acp_dma_cntl_0_t) dma_chan_reg_read(channel, ACP_DMA_CNTL_0);
166 dma_cntl.bits.dmachrun = 0;
167 dma_chan_reg_write(channel, ACP_DMA_CNTL_0, dma_cntl.u32all);
168 /* Load start index of decriptor and priority */
169 dma_chan_reg_write(channel, ACP_DMA_DSCR_STRT_IDX_0, strt_idx);
170 dma_chan_reg_write(channel, ACP_DMA_PRIO_0, 1);
171 channel->status = COMP_STATE_PREPARE;
172 }
173
acp_dma_channel_get(struct dma * dma,unsigned int req_chan)174 static struct dma_chan_data *acp_dma_channel_get(struct dma *dma,
175 unsigned int req_chan)
176 {
177 uint32_t flags;
178 struct dma_chan_data *channel;
179
180 spin_lock_irq(&dma->lock, flags);
181 if (req_chan >= dma->plat_data.channels) {
182 spin_unlock_irq(&dma->lock, flags);
183 tr_err(&acpdma_tr, "DMA: Channel %d not in range", req_chan);
184 return NULL;
185 }
186 channel = &dma->chan[req_chan];
187 if (channel->status != COMP_STATE_INIT) {
188 spin_unlock_irq(&dma->lock, flags);
189 tr_err(&acpdma_tr, "DMA: channel already in use %d", req_chan);
190 return NULL;
191 }
192 atomic_add(&dma->num_channels_busy, 1);
193 channel->status = COMP_STATE_READY;
194 spin_unlock_irq(&dma->lock, flags);
195 /* reset read and write pointers */
196 struct acp_dma_chan_data *acp_dma_chan = dma_chan_get_data(channel);
197
198 acp_dma_chan->config[req_chan].rd_size = 0;
199 acp_dma_chan->config[req_chan].wr_size = 0;
200 return channel;
201 }
202
acp_dma_channel_put(struct dma_chan_data * channel)203 static void acp_dma_channel_put(struct dma_chan_data *channel)
204 {
205 uint32_t flags;
206
207 spin_lock_irq(&channel->dma->lock, flags);
208 channel->status = COMP_STATE_INIT;
209 atomic_sub(&channel->dma->num_channels_busy, 1);
210 spin_unlock_irq(&channel->dma->lock, flags);
211 /* reset read and write pointer */
212 struct acp_dma_chan_data *acp_dma_chan = dma_chan_get_data(channel);
213
214 acp_dma_chan->config[channel->index].rd_size = 0;
215 acp_dma_chan->config[channel->index].wr_size = 0;
216 }
217
218 /* Stop the requested channel */
acp_dma_stop(struct dma_chan_data * channel)219 static int acp_dma_stop(struct dma_chan_data *channel)
220 {
221 acp_dma_cntl_0_t dma_cntl;
222 acp_dma_ch_sts_t ch_sts;
223 uint32_t dmach_mask;
224 uint32_t delay_cnt = 10000;
225
226 switch (channel->status) {
227 case COMP_STATE_READY:
228 case COMP_STATE_PREPARE:
229 return 0; /* do not try to stop multiple times */
230 case COMP_STATE_PAUSED:
231 case COMP_STATE_ACTIVE:
232 break;
233 default:
234 return -EINVAL;
235 }
236 channel->status = COMP_STATE_READY;
237 dmach_mask = (1 << channel->index);
238 dma_cntl = (acp_dma_cntl_0_t) dma_chan_reg_read(channel, ACP_DMA_CNTL_0);
239 /* Do the HW stop of the DMA */
240 /* set DMAChRst bit to stop the transfer */
241 dma_cntl.bits.dmachrun = 0;
242 dma_cntl.bits.dmachiocen = 0;
243 dma_chan_reg_write(channel, ACP_DMA_CNTL_0, dma_cntl.u32all);
244 ch_sts = (acp_dma_ch_sts_t) dma_reg_read(channel->dma, ACP_DMA_CH_STS);
245 if (ch_sts.bits.dmachrunsts & dmach_mask) {
246 /* set the reset bit for this channel to stop the dma transfer */
247 dma_cntl.bits.dmachrst = 1;
248 dma_chan_reg_write(channel, ACP_DMA_CNTL_0, dma_cntl.u32all);
249 }
250 while (delay_cnt > 0) {
251 ch_sts = (acp_dma_ch_sts_t) dma_reg_read(channel->dma, ACP_DMA_CH_STS);
252 if (!(ch_sts.bits.dmachrunsts & dmach_mask)) {
253 /* clear the reset flag after successfully stopping the dma transfer
254 * and break from the loop
255 */
256 dma_cntl.bits.dmachrst = 0;
257 dma_chan_reg_write(channel, ACP_DMA_CNTL_0, dma_cntl.u32all);
258 break;
259 }
260 delay_cnt--;
261 }
262 return 0;
263 }
264
acp_dma_start(struct dma_chan_data * channel)265 static int acp_dma_start(struct dma_chan_data *channel)
266 {
267 acp_dma_cntl_0_t dma_cntl;
268 acp_dma_ch_sts_t dma_sts;
269 uint32_t chan_sts;
270 int ret = 0;
271
272 struct timer *timer = timer_get();
273 uint64_t deadline = platform_timer_get(timer) +
274 clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, 1) * 500 / 1000;
275 if (channel->status != COMP_STATE_PREPARE &&
276 channel->status != COMP_STATE_SUSPEND)
277 return -EINVAL;
278 channel->status = COMP_STATE_ACTIVE;
279 /* Clear DMAChRun before starting the DMA Ch */
280 dma_cntl = (acp_dma_cntl_0_t) dma_chan_reg_read(channel, ACP_DMA_CNTL_0);
281 dma_cntl.bits.dmachrun = 0;
282 dma_cntl.bits.dmachiocen = 0;
283 dma_chan_reg_write(channel, ACP_DMA_CNTL_0, dma_cntl.u32all);
284
285 dma_cntl = (acp_dma_cntl_0_t) dma_chan_reg_read(channel, ACP_DMA_CNTL_0);
286 dma_cntl.bits.dmachrun = 1;
287 dma_cntl.bits.dmachiocen = 0;
288
289 /* set dmachrun bit to start the transfer */
290 dma_chan_reg_write(channel, ACP_DMA_CNTL_0, dma_cntl.u32all);
291
292 /* poll for the status bit
293 * to finish the dma transfer
294 * then initiate call back function
295 */
296 dma_sts = (acp_dma_ch_sts_t)dma_reg_read(channel->dma, ACP_DMA_CH_STS);
297 chan_sts = dma_sts.u32all & (1<<channel->index);
298 while (chan_sts) {
299 if (deadline < platform_timer_get(timer)) {
300 /* safe check in case we've got preempted after read */
301 if (chan_sts)
302 return 0;
303 tr_err(&acpdma_tr, "acp-dma: timed out for dma start");
304 return -ETIME;
305 }
306 dma_sts = (acp_dma_ch_sts_t)dma_reg_read(channel->dma, ACP_DMA_CH_STS);
307 chan_sts = dma_sts.u32all & (1<<channel->index);
308 }
309 return ret;
310 }
311
acp_dma_release(struct dma_chan_data * channel)312 static int acp_dma_release(struct dma_chan_data *channel)
313 {
314 tr_info(&acpdma_tr, "DMA: release(%d)", channel->index);
315 if (channel->status != COMP_STATE_PAUSED)
316 return -EINVAL;
317 channel->status = COMP_STATE_ACTIVE;
318 return 0;
319 }
320
acp_dma_pause(struct dma_chan_data * channel)321 static int acp_dma_pause(struct dma_chan_data *channel)
322 {
323 tr_info(&acpdma_tr, "h/w pause is not supported, changing the status of(%d) channel",
324 channel->index);
325 if (channel->status != COMP_STATE_ACTIVE)
326 return -EINVAL;
327 channel->status = COMP_STATE_PAUSED;
328 return 0;
329 }
330
acp_dma_copy(struct dma_chan_data * channel,int bytes,uint32_t flags)331 static int acp_dma_copy(struct dma_chan_data *channel, int bytes, uint32_t flags)
332 {
333 struct dma_cb_data next = {
334 .channel = channel,
335 .elem.size = bytes,
336 };
337 acp_dma_ch_sts_t ch_sts;
338 uint32_t dmach_mask = (1 << channel->index);
339 int ret = 0;
340
341 if (flags & DMA_COPY_ONE_SHOT) {
342 ret = acp_dma_start(channel);
343 if (ret < 0)
344 return ret;
345 ch_sts = (acp_dma_ch_sts_t) dma_reg_read(channel->dma, ACP_DMA_CH_STS);
346 while (ch_sts.bits.dmachrunsts & dmach_mask) {
347 ch_sts = (acp_dma_ch_sts_t) dma_reg_read(channel->dma, ACP_DMA_CH_STS);
348 if (!(ch_sts.bits.dmachrunsts & dmach_mask))
349 break;
350 }
351 ret = acp_dma_stop(channel);
352 }
353 notifier_event(channel, NOTIFIER_ID_DMA_COPY,
354 NOTIFIER_TARGET_CORE_LOCAL, &next, sizeof(next));
355 /* Reconfigure dma descriptors for stream channels only */
356 if (channel->index != DMA_TRACE_CHANNEL) {
357 /* Reconfigure the dma descriptors for next buffer of data after the call back */
358 dma_reconfig(channel, bytes);
359 /* Start the dma for requested channel */
360 acp_dma_start(channel);
361 /* Stop the dma for requested channel */
362 acp_dma_stop(channel);
363 }
364 return ret;
365 }
366
acp_dma_status(struct dma_chan_data * channel,struct dma_chan_status * status,uint8_t direction)367 static int acp_dma_status(struct dma_chan_data *channel,
368 struct dma_chan_status *status, uint8_t direction)
369 {
370 status->state = channel->status;
371 status->flags = 0;
372 status->timestamp = timer_get_system(timer_get());
373 return 0;
374 }
375
376 /* Some set_config helper functions */
dma_setup(struct dma_chan_data * channel,struct dma_sg_elem_array * sgelems,uint32_t dir)377 static int dma_setup(struct dma_chan_data *channel,
378 struct dma_sg_elem_array *sgelems, uint32_t dir)
379 {
380 uint32_t dscr_cnt, dscr = 0;
381 uint32_t tc;
382 uint16_t dscr_strt_idx = 0;
383 uint32_t *phy_off;
384 uint32_t *syst_buff_size;
385 acp_dma_cntl_0_t dma_cntl;
386 struct acp_dma_config *dma_cfg;
387 struct acp_dma_chan_data *acp_dma_chan = dma_chan_get_data(channel);
388
389 volatile acp_scratch_mem_config_t *pscratch_mem_cfg =
390 (volatile acp_scratch_mem_config_t *)(PU_REGISTER_BASE + SCRATCH_REG_OFFSET);
391 dscr_cnt = sgelems->count;
392 /* Trace uses descriptor from index seven */
393 /* and other streams use descriptors from zero */
394 if (channel->index == DMA_TRACE_CHANNEL)
395 dscr_strt_idx = DMA_TRACE_CHANNEL;
396 else
397 dscr_strt_idx = 0;
398 /* ACP DMA Descriptor in scratch memory */
399 acp_cfg_dma_descriptor_t *dma_config_dscr;
400
401 dma_config_dscr = (acp_cfg_dma_descriptor_t *)(pscratch_mem_cfg->acp_cfg_dma_descriptor);
402 /* physical offset of system memory */
403 phy_off = (uint32_t *)(pscratch_mem_cfg->phy_offset);
404 /* size of system memory buffer */
405 syst_buff_size = (uint32_t *)(pscratch_mem_cfg->syst_buff_size);
406
407 for (dscr = 0; dscr < dscr_cnt; dscr++) {
408 if (dir == DMA_DIR_HMEM_TO_LMEM) {
409 dma_config_dscr[dscr_strt_idx + dscr].src_addr =
410 sgelems->elems[dscr].src + ACP_SYST_MEM_WINDOW;
411 dma_config_dscr[dscr_strt_idx + dscr].dest_addr =
412 sgelems->elems[dscr].dest & ACP_DRAM_ADDRESS_MASK;
413 } else {
414 dma_config_dscr[dscr_strt_idx + dscr].dest_addr =
415 sgelems->elems[dscr].dest + ACP_SYST_MEM_WINDOW;
416 dma_config_dscr[dscr_strt_idx + dscr].src_addr =
417 sgelems->elems[dscr].src & ACP_DRAM_ADDRESS_MASK;
418 }
419 dma_config_dscr[dscr_strt_idx + dscr].trns_cnt.u32All = 0;
420 dma_config_dscr[dscr_strt_idx + dscr].trns_cnt.bits.trns_cnt =
421 sgelems->elems[dscr].size;
422 }
423 dma_config_dscr[dscr_strt_idx + (dscr-1)].trns_cnt.bits.ioc = 0;
424 dma_cfg = &acp_dma_chan->config[channel->index];
425 /* bytes of data to be transferred for the dma */
426 tc = dma_config_dscr[dscr_strt_idx].trns_cnt.bits.trns_cnt;
427 /* DMA configuration for stream */
428 if (channel->index != DMA_TRACE_CHANNEL) {
429 acp_dma_chan->dir = dir;
430 acp_dma_chan->idx = channel->index;
431 dma_cfg->phy_off = phy_off[channel->index];
432 dma_cfg->size = tc * dscr_cnt;
433 dma_cfg->sys_buff_size = syst_buff_size[channel->index];
434
435 if (dir == DMA_DIR_HMEM_TO_LMEM) {
436 /* Playback */
437 dma_cfg->base = dma_config_dscr[dscr_strt_idx].dest_addr;
438 dma_cfg->wr_size = 0;
439 dma_cfg->rd_size = dma_cfg->size;
440 } else {
441 /* Capture */
442 dma_cfg->base = dma_config_dscr[dscr_strt_idx].src_addr;
443 dma_cfg->wr_size = dma_cfg->size;
444 dma_cfg->rd_size = 0;
445 }
446 }
447 /* clear the dma channel control bits */
448 dma_cntl = (acp_dma_cntl_0_t) dma_chan_reg_read(channel, ACP_DMA_CNTL_0);
449 dma_cntl.bits.dmachrun = 0;
450 dma_cntl.bits.dmachiocen = 0;
451 dma_chan_reg_write(channel, ACP_DMA_CNTL_0, dma_cntl.u32all);
452
453 /* Program DMAChDscrStrIdx to the index
454 * number of the first descriptor to be processed.
455 */
456 dma_chan_reg_write(channel, ACP_DMA_DSCR_STRT_IDX_0, dscr_strt_idx);
457 /* program DMAChDscrdscrcnt to the
458 * number of descriptors to be processed in the transfer
459 */
460 dma_chan_reg_write(channel, ACP_DMA_DSCR_CNT_0, dscr_cnt);
461 /* set DMAChPrioLvl according to the priority */
462 dma_chan_reg_write(channel, ACP_DMA_PRIO_0, 1);
463 channel->status = COMP_STATE_PREPARE;
464 return 0;
465 }
466
467 /* set the DMA channel configuration, source/target address, buffer sizes */
acp_dma_set_config(struct dma_chan_data * channel,struct dma_sg_config * config)468 static int acp_dma_set_config(struct dma_chan_data *channel,
469 struct dma_sg_config *config)
470 {
471 uint32_t dir;
472
473 channel->direction = config->direction;
474 dir = config->direction;
475 if (config->cyclic) {
476 tr_err(&acpdma_tr,
477 "DMA: cyclic configurations are not supported");
478 return -EINVAL;
479 }
480 if (config->scatter) {
481 tr_err(&acpdma_tr,
482 "DMA: scatter is not supported Chan.Index %d scatter %d",
483 channel->index, config->scatter);
484 return -EINVAL;
485 }
486 return dma_setup(channel, &config->elem_array, dir);
487 }
488
acp_dma_pm_context_restore(struct dma * dma)489 static int acp_dma_pm_context_restore(struct dma *dma)
490 {
491 /* TODO */
492 return 0;
493 }
494
acp_dma_pm_context_store(struct dma * dma)495 static int acp_dma_pm_context_store(struct dma *dma)
496 {
497 /* TODO */
498 return 0;
499 }
500
acp_dma_probe(struct dma * dma)501 static int acp_dma_probe(struct dma *dma)
502 {
503 struct acp_dma_chan_data *acp_dma_chan;
504 int channel;
505
506 if (dma->chan) {
507 tr_err(&acpdma_tr, "DMA: Already probe");
508 return -EEXIST;
509 }
510 dma->chan = rzalloc(SOF_MEM_ZONE_RUNTIME, 0, SOF_MEM_CAPS_RAM,
511 dma->plat_data.channels *
512 sizeof(struct dma_chan_data));
513 if (!dma->chan) {
514 tr_err(&acpdma_tr, "DMA: unable to allocate channel context");
515 return -ENOMEM;
516 }
517 for (channel = 0; channel < dma->plat_data.channels; channel++) {
518 dma->chan[channel].dma = dma;
519 dma->chan[channel].index = channel;
520 dma->chan[channel].status = COMP_STATE_INIT;
521 acp_dma_chan = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0,
522 SOF_MEM_CAPS_RAM,
523 sizeof(struct acp_dma_chan_data));
524 if (!acp_dma_chan) {
525 rfree(dma->chan);
526 tr_err(&acpdma_tr, "acp-dma: %d channel %d private data alloc failed",
527 dma->plat_data.id, channel);
528 return -ENOMEM;
529 }
530 dma_chan_set_data(&dma->chan[channel], acp_dma_chan);
531 }
532 return 0;
533 }
534
acp_dma_remove(struct dma * dma)535 static int acp_dma_remove(struct dma *dma)
536 {
537 int channel;
538
539 if (!dma->chan) {
540 tr_err(&acpdma_tr, "DMA: Invalid remove call");
541 return 0;
542 }
543 for (channel = 0; channel < dma->plat_data.channels; channel++)
544 rfree(dma->chan[channel].priv_data);
545 rfree(dma->chan);
546 dma->chan = NULL;
547
548 return 0;
549 }
550
acp_dma_interrupt(struct dma_chan_data * channel,enum dma_irq_cmd cmd)551 static int acp_dma_interrupt(struct dma_chan_data *channel, enum dma_irq_cmd cmd)
552 {
553 uint32_t status;
554
555 if (channel->status == COMP_STATE_INIT)
556 return 0;
557
558 switch (cmd) {
559 case DMA_IRQ_STATUS_GET:
560 status = dma_reg_read(channel->dma, ACP_DSP0_INTR_STAT) & 0xFF;
561 return status & (1 << channel->index);
562 case DMA_IRQ_CLEAR:
563 status = dma_reg_read(channel->dma, ACP_DSP0_INTR_STAT);
564 status = status & (1 << channel->index);
565 dma_reg_write(channel->dma, ACP_DSP0_INTR_STAT, status);
566 return 0;
567 case DMA_IRQ_MASK:
568 status = dma_reg_read(channel->dma, ACP_DSP0_INTR_CNTL);
569 status = status & (~(1 << channel->index));
570 dma_reg_write(channel->dma, ACP_DSP0_INTR_CNTL, status);
571 return 0;
572 case DMA_IRQ_UNMASK:
573 status = dma_reg_read(channel->dma, ACP_DSP0_INTR_CNTL);
574 status = status | (1 << channel->index);
575 dma_reg_write(channel->dma, ACP_DSP0_INTR_CNTL, status);
576 return 0;
577 default:
578 return -EINVAL;
579 }
580 }
581
acp_dma_get_attribute(struct dma * dma,uint32_t type,uint32_t * value)582 static int acp_dma_get_attribute(struct dma *dma, uint32_t type, uint32_t *value)
583 {
584 switch (type) {
585 case DMA_ATTR_BUFFER_ALIGNMENT:
586 case DMA_ATTR_COPY_ALIGNMENT:
587 *value = ACP_DMA_BUFFER_ALIGN;
588 break;
589 case DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT:
590 *value = PLATFORM_DCACHE_ALIGN;
591 break;
592 case DMA_ATTR_BUFFER_PERIOD_COUNT:
593 *value = ACP_DMA_BUFFER_PERIOD_COUNT;
594 break;
595 default:
596 return -ENOENT; /* Attribute not found */
597 }
598 return 0;
599 }
600
acp_dma_get_data_size(struct dma_chan_data * channel,uint32_t * avail,uint32_t * free)601 static int acp_dma_get_data_size(struct dma_chan_data *channel,
602 uint32_t *avail, uint32_t *free)
603 {
604 struct acp_dma_chan_data *acp_dma_chan = dma_chan_get_data(channel);
605 uint32_t data_size;
606 uint32_t tc; /* transfer count in bytes */
607
608 tc = acp_dma_chan->config[channel->index].size;
609 data_size = (int32_t)tc;
610 switch (channel->direction) {
611 case DMA_DIR_MEM_TO_DEV:
612 case DMA_DIR_HMEM_TO_LMEM:
613 *avail = ABS(data_size) / 2;
614 break;
615 case DMA_DIR_DEV_TO_MEM:
616 case DMA_DIR_LMEM_TO_HMEM:
617 *free = ABS(data_size) / 2;
618 break;
619 default:
620 tr_err(&acpdma_tr, "dma_get_data_size() Invalid direction %d",
621 channel->direction);
622 return -EINVAL;
623 }
624 return 0;
625 }
626
627 const struct dma_ops acp_dma_ops = {
628 .channel_get = acp_dma_channel_get,
629 .channel_put = acp_dma_channel_put,
630 .start = acp_dma_start,
631 .stop = acp_dma_stop,
632 .pause = acp_dma_pause,
633 .release = acp_dma_release,
634 .copy = acp_dma_copy,
635 .status = acp_dma_status,
636 .set_config = acp_dma_set_config,
637 .pm_context_restore = acp_dma_pm_context_restore,
638 .pm_context_store = acp_dma_pm_context_store,
639 .probe = acp_dma_probe,
640 .remove = acp_dma_remove,
641 .interrupt = acp_dma_interrupt,
642 .get_attribute = acp_dma_get_attribute,
643 .get_data_size = acp_dma_get_data_size,
644 };
645