Lines Matching +full:dma +full:- +full:enabled

7  * SPDX-License-Identifier: Apache-2.0
10 #include <zephyr/drivers/dma.h>
27 const struct device *dma; member
57 * - DMIC DIVHFCLK is set to 0x0 (divide by 1) in dmic_mcux_get_osr()
58 * - DMIC PHY_HALF is set to 0x0 (standard sample rate) in dmic_mcux_get_osr()
74 dmic_parse_channel_map(drv_data->chan_map_lo, in dmic_mcux_hw_chan()
75 drv_data->chan_map_hi, in dmic_mcux_hw_chan()
88 /* PDM channel 0 must always be enabled, as the RM states: in dmic_mcux_activate_channels()
95 for (uint8_t chan = 0; chan < drv_data->act_num_chan; chan++) { in dmic_mcux_activate_channels()
101 DMIC_EnableChannnel(drv_data->base_address, mask); in dmic_mcux_activate_channels()
104 drv_data->base_address->CHANEN &= ~mask; in dmic_mcux_activate_channels()
111 uint8_t num_chan = drv_data->act_num_chan; in dmic_mcux_enable_dma()
118 pdm_channel = drv_data->pdm_channels[hw_chan]; in dmic_mcux_enable_dma()
120 ret = dma_start(pdm_channel->dma, pdm_channel->dma_chan); in dmic_mcux_enable_dma()
122 LOG_ERR("Could not start DMA for HW channel %d", in dmic_mcux_enable_dma()
127 if (dma_stop(pdm_channel->dma, pdm_channel->dma_chan)) { in dmic_mcux_enable_dma()
128 ret = -EIO; in dmic_mcux_enable_dma()
131 DMIC_EnableChannelDma(drv_data->base_address, in dmic_mcux_enable_dma()
138 /* Helper to reload DMA engine for all active channels with new buffer */
145 uint8_t num_chan = drv_data->act_num_chan; in dmic_mcux_reload_dma()
146 uint32_t dma_buf_size = drv_data->block_size / num_chan; in dmic_mcux_reload_dma()
149 /* This function reloads the DMA engine for all active DMA channels in dmic_mcux_reload_dma()
150 * with the provided buffer. Each DMA channel will start in dmic_mcux_reload_dma()
156 pdm_channel = drv_data->pdm_channels[hw_chan]; in dmic_mcux_reload_dma()
157 src = DMIC_FifoGetAddress(drv_data->base_address, hw_chan); in dmic_mcux_reload_dma()
159 ret = dma_reload(pdm_channel->dma, pdm_channel->dma_chan, in dmic_mcux_reload_dma()
168 /* Helper to get next buffer index for DMA */
181 /* Disable DMA */ in dmic_mcux_stop()
186 k_mem_slab_free(drv_data->mem_slab, drv_data->dma_bufs[i]); in dmic_mcux_stop()
190 k_msgq_purge(drv_data->rx_queue); in dmic_mcux_stop()
192 drv_data->dmic_state = DMIC_STATE_CONFIGURED; in dmic_mcux_stop()
203 void *done_buffer = drv_data->dma_bufs[drv_data->active_buf_idx]; in dmic_mcux_dma_cb()
209 /* DMA has failed, free allocated blocks */ in dmic_mcux_dma_cb()
210 LOG_ERR("DMA reports error"); in dmic_mcux_dma_cb()
213 /* Free all allocated DMA buffers */ in dmic_mcux_dma_cb()
215 drv_data->dmic_state = DMIC_STATE_ERROR; in dmic_mcux_dma_cb()
222 ret = k_mem_slab_alloc(drv_data->mem_slab, &new_buffer, K_NO_WAIT); in dmic_mcux_dma_cb()
226 * rx queue (or the DMA would stave). Therefore, we just in dmic_mcux_dma_cb()
228 * by the DMA. in dmic_mcux_dma_cb()
231 drv_data->dmic_state = DMIC_STATE_ERROR; in dmic_mcux_dma_cb()
232 /* Reload DMA */ in dmic_mcux_dma_cb()
235 drv_data->active_buf_idx = in dmic_mcux_dma_cb()
236 dmic_mcux_next_buf_idx(drv_data->active_buf_idx); in dmic_mcux_dma_cb()
240 /* DMA issues an interrupt at the completion of every block. in dmic_mcux_dma_cb()
245 ret = k_msgq_put(drv_data->rx_queue, &done_buffer, K_NO_WAIT); in dmic_mcux_dma_cb()
248 k_mem_slab_free(drv_data->mem_slab, new_buffer); in dmic_mcux_dma_cb()
251 * in place to be overwritten by the DMA in dmic_mcux_dma_cb()
254 drv_data->dmic_state = DMIC_STATE_ERROR; in dmic_mcux_dma_cb()
255 /* Reload DMA */ in dmic_mcux_dma_cb()
258 drv_data->active_buf_idx = in dmic_mcux_dma_cb()
259 dmic_mcux_next_buf_idx(drv_data->active_buf_idx); in dmic_mcux_dma_cb()
264 * Replace pointer to previous buffer in our dma slots array, in dmic_mcux_dma_cb()
265 * and reload DMA with next buffer. in dmic_mcux_dma_cb()
267 drv_data->dma_bufs[drv_data->active_buf_idx] = new_buffer; in dmic_mcux_dma_cb()
270 drv_data->active_buf_idx = dmic_mcux_next_buf_idx(drv_data->active_buf_idx); in dmic_mcux_dma_cb()
275 struct mcux_dmic_drv_data *drv_data = dev->data; in dmic_mcux_setup_dma()
279 uint8_t num_chan = drv_data->act_num_chan; in dmic_mcux_setup_dma()
280 uint32_t dma_buf_size = drv_data->block_size / num_chan; in dmic_mcux_setup_dma()
282 void *dma_buf = drv_data->dma_bufs[dma_buf_idx]; in dmic_mcux_setup_dma()
287 /* Setup DMA configuration common between all channels */ in dmic_mcux_setup_dma()
297 /* When multiple channels are enabled simultaneously, the DMA in dmic_mcux_setup_dma()
298 * completion interrupt from one channel will signal that DMA data in dmic_mcux_setup_dma()
301 * DMA completion callback for the first channel we setup in dmic_mcux_setup_dma()
309 DMIC_FifoGetAddress(drv_data->base_address, hw_chan); in dmic_mcux_setup_dma()
313 * the resulting DMA buffer would look like: in dmic_mcux_setup_dma()
326 /* Enable circular mode- when the final DMA block in dmic_mcux_setup_dma()
327 * is exhausted, we want the DMA controller in dmic_mcux_setup_dma()
332 if (blk < (CONFIG_DMIC_MCUX_DMA_BUFFERS - 1)) { in dmic_mcux_setup_dma()
338 /* Select next dma buffer in array */ in dmic_mcux_setup_dma()
340 dma_buf = drv_data->dma_bufs[dma_buf_idx]; in dmic_mcux_setup_dma()
342 pdm_channel = drv_data->pdm_channels[hw_chan]; in dmic_mcux_setup_dma()
344 ret = dma_config(pdm_channel->dma, pdm_channel->dma_chan, &dma_cfg); in dmic_mcux_setup_dma()
362 struct mcux_dmic_drv_data *drv_data = dev->data; in dmic_mcux_init_channel()
364 if (!drv_data->pdm_channels[chan]) { in dmic_mcux_init_channel()
366 return -EINVAL; in dmic_mcux_init_channel()
369 drv_data->pdm_channels[chan]->dmic_channel_cfg.osr = osr; in dmic_mcux_init_channel()
371 DMIC_ConfigChannel(drv_data->base_address, (dmic_channel_t)chan, in dmic_mcux_init_channel()
373 &drv_data->pdm_channels[chan]->dmic_channel_cfg); in dmic_mcux_init_channel()
375 * DMA too frequently in dmic_mcux_init_channel()
377 DMIC_FifoChannel(drv_data->base_address, chan, 15, true, true); in dmic_mcux_init_channel()
378 /* Disable interrupts. DMA will be enabled in dmic_mcux_trigger. */ in dmic_mcux_init_channel()
379 DMIC_EnableChannelInterrupt(drv_data->base_address, chan, false); in dmic_mcux_init_channel()
385 const struct mcux_dmic_cfg *config = dev->config; in mcux_dmic_init()
386 struct mcux_dmic_drv_data *drv_data = dev->data; in mcux_dmic_init()
389 ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); in mcux_dmic_init()
393 DMIC_Init(drv_data->base_address); in mcux_dmic_init()
394 DMIC_Use2fs(drv_data->base_address, config->use2fs); in mcux_dmic_init()
397 DMIC_SetIOCFG(drv_data->base_address, kDMIC_PdmDual); in mcux_dmic_init()
399 drv_data->dmic_state = DMIC_STATE_INITIALIZED; in mcux_dmic_init()
407 const struct mcux_dmic_cfg *drv_config = dev->config; in dmic_mcux_configure()
408 struct mcux_dmic_drv_data *drv_data = dev->data; in dmic_mcux_configure()
409 struct pdm_chan_cfg *channel = &config->channel; in dmic_mcux_configure()
410 struct pcm_stream_cfg *stream = &config->streams[0]; in dmic_mcux_configure()
416 if (drv_data->dmic_state == DMIC_STATE_ACTIVE) { in dmic_mcux_configure()
418 return -EBUSY; in dmic_mcux_configure()
422 if (channel->req_num_streams != 1) { in dmic_mcux_configure()
423 return -EINVAL; in dmic_mcux_configure()
429 if (channel->req_num_chan > FSL_FEATURE_DMIC_CHANNEL_NUM) { in dmic_mcux_configure()
431 return -ENOTSUP; in dmic_mcux_configure()
434 if (stream->pcm_rate == 0 || stream->pcm_width == 0) { in dmic_mcux_configure()
435 if (drv_data->dmic_state == DMIC_STATE_CONFIGURED) { in dmic_mcux_configure()
436 DMIC_DeInit(drv_data->base_address); in dmic_mcux_configure()
437 drv_data->dmic_state = DMIC_STATE_UNINIT; in dmic_mcux_configure()
443 if (drv_data->dmic_state == DMIC_STATE_UNINIT) { in dmic_mcux_configure()
454 * "destination address increment" function of the LPC DMA IP. Since in dmic_mcux_configure()
455 * the LPC DMA IP does not support 3 byte wide transfers, we cannot in dmic_mcux_configure()
459 if (stream->pcm_width != 16) { in dmic_mcux_configure()
461 return -ENOTSUP; in dmic_mcux_configure()
464 ret = clock_control_get_rate(drv_config->clock_dev, in dmic_mcux_configure()
465 drv_config->clock_name, &bit_clk_rate); in dmic_mcux_configure()
471 if ((config->io.min_pdm_clk_freq > bit_clk_rate) || in dmic_mcux_configure()
472 (config->io.max_pdm_clk_freq < bit_clk_rate)) { in dmic_mcux_configure()
473 return -EINVAL; in dmic_mcux_configure()
478 osr = dmic_mcux_get_osr(stream->pcm_rate, bit_clk_rate, drv_config->use2fs); in dmic_mcux_configure()
484 channel->act_num_chan = 0; in dmic_mcux_configure()
486 drv_data->chan_map_lo = channel->req_chan_map_lo; in dmic_mcux_configure()
487 drv_data->chan_map_hi = channel->req_chan_map_hi; in dmic_mcux_configure()
488 for (uint8_t chan = 0; chan < channel->req_num_chan; chan += 2) { in dmic_mcux_configure()
490 dmic_parse_channel_map(channel->req_chan_map_lo, in dmic_mcux_configure()
491 channel->req_chan_map_hi, in dmic_mcux_configure()
493 if ((chan + 1) < channel->req_num_chan) { in dmic_mcux_configure()
494 /* Paired channel is enabled */ in dmic_mcux_configure()
495 dmic_parse_channel_map(channel->req_chan_map_lo, in dmic_mcux_configure()
496 channel->req_chan_map_hi, in dmic_mcux_configure()
501 return -EINVAL; in dmic_mcux_configure()
511 channel->act_num_chan++; in dmic_mcux_configure()
512 if ((chan + 1) < channel->req_num_chan) { in dmic_mcux_configure()
513 /* Paired channel is enabled */ in dmic_mcux_configure()
521 channel->act_num_chan++; in dmic_mcux_configure()
525 channel->act_chan_map_lo = channel->req_chan_map_lo; in dmic_mcux_configure()
526 channel->act_chan_map_hi = channel->req_chan_map_hi; in dmic_mcux_configure()
528 drv_data->mem_slab = stream->mem_slab; in dmic_mcux_configure()
529 drv_data->block_size = stream->block_size; in dmic_mcux_configure()
530 drv_data->act_num_chan = channel->act_num_chan; in dmic_mcux_configure()
531 drv_data->dmic_state = DMIC_STATE_CONFIGURED; in dmic_mcux_configure()
538 struct mcux_dmic_drv_data *drv_data = dev->data; in dmic_mcux_start()
542 * We queue buffers so that when the DMA is operating on buffer "n", in dmic_mcux_start()
543 * buffer "n+1" is already queued in the DMA hardware. When buffer "n" in dmic_mcux_start()
545 * DMA descriptor chain. This approach requires the driver to allocate in dmic_mcux_start()
550 /* Allocate buffers for DMA */ in dmic_mcux_start()
551 ret = k_mem_slab_alloc(drv_data->mem_slab, in dmic_mcux_start()
552 &drv_data->dma_bufs[i], K_NO_WAIT); in dmic_mcux_start()
555 return -ENOBUFS; in dmic_mcux_start()
576 struct mcux_dmic_drv_data *drv_data = dev->data; in dmic_mcux_trigger()
581 if (drv_data->dmic_state == DMIC_STATE_ACTIVE) { in dmic_mcux_trigger()
584 drv_data->dmic_state = DMIC_STATE_PAUSED; in dmic_mcux_trigger()
587 if (drv_data->dmic_state == DMIC_STATE_ACTIVE) { in dmic_mcux_trigger()
590 drv_data->dmic_state = DMIC_STATE_CONFIGURED; in dmic_mcux_trigger()
594 if (drv_data->dmic_state == DMIC_STATE_PAUSED) { in dmic_mcux_trigger()
597 drv_data->dmic_state = DMIC_STATE_ACTIVE; in dmic_mcux_trigger()
600 if ((drv_data->dmic_state != DMIC_STATE_CONFIGURED) && in dmic_mcux_trigger()
601 (drv_data->dmic_state != DMIC_STATE_ACTIVE)) { in dmic_mcux_trigger()
603 return -EIO; in dmic_mcux_trigger()
604 } else if (drv_data->dmic_state != DMIC_STATE_ACTIVE) { in dmic_mcux_trigger()
607 return -EIO; in dmic_mcux_trigger()
609 drv_data->dmic_state = DMIC_STATE_ACTIVE; in dmic_mcux_trigger()
614 DMIC_DeInit(drv_data->base_address); in dmic_mcux_trigger()
615 drv_data->dmic_state = DMIC_STATE_UNINIT; in dmic_mcux_trigger()
619 return -EINVAL; in dmic_mcux_trigger()
628 struct mcux_dmic_drv_data *drv_data = dev->data; in dmic_mcux_read()
633 if (drv_data->dmic_state == DMIC_STATE_ERROR) { in dmic_mcux_read()
635 return -EIO; in dmic_mcux_read()
638 if ((drv_data->dmic_state != DMIC_STATE_CONFIGURED) && in dmic_mcux_read()
639 (drv_data->dmic_state != DMIC_STATE_ACTIVE) && in dmic_mcux_read()
640 (drv_data->dmic_state != DMIC_STATE_PAUSED)) { in dmic_mcux_read()
642 return -EIO; in dmic_mcux_read()
645 ret = k_msgq_get(drv_data->rx_queue, buffer, SYS_TIMEOUT_MS(timeout)); in dmic_mcux_read()
649 *size = drv_data->block_size; in dmic_mcux_read()
663 (val >= 0) ? (val & 0xF) : (BIT(4) | (0x10 - (val & 0xF)))
669 .dma = DEVICE_DT_GET(DT_DMAS_CTLR(pdm_node)), \
683 /* Defines structures for all enabled PDM channels */