1 /*
2 * Copyright 2023 NXP
3 * Copyright (c) 2021 Nordic Semiconductor ASA
4 *
5 * based on dmic_nrfx_pdm.c
6 *
7 * SPDX-License-Identifier: Apache-2.0
8 */
9
10 #include <zephyr/drivers/dma.h>
11 #include <zephyr/audio/dmic.h>
12 #include <zephyr/drivers/pinctrl.h>
13 #include <zephyr/drivers/timer/system_timer.h>
14 #include <zephyr/drivers/clock_control.h>
15 #include <soc.h>
16
17 #include <fsl_dmic.h>
18
19 #include <zephyr/logging/log.h>
20 #include <zephyr/irq.h>
21 LOG_MODULE_REGISTER(dmic_mcux, CONFIG_AUDIO_DMIC_LOG_LEVEL);
22
23 #define DT_DRV_COMPAT nxp_dmic
24
25 struct mcux_dmic_pdm_chan {
26 dmic_channel_config_t dmic_channel_cfg;
27 const struct device *dma;
28 uint8_t dma_chan;
29 };
30
31 struct mcux_dmic_drv_data {
32 struct k_mem_slab *mem_slab;
33 void *dma_bufs[CONFIG_DMIC_MCUX_DMA_BUFFERS];
34 uint8_t active_buf_idx;
35 uint32_t block_size;
36 DMIC_Type *base_address;
37 struct mcux_dmic_pdm_chan **pdm_channels;
38 uint8_t act_num_chan;
39 struct k_msgq *rx_queue;
40 uint32_t chan_map_lo;
41 uint32_t chan_map_hi;
42 enum dmic_state dmic_state;
43 };
44
45 struct mcux_dmic_cfg {
46 const struct pinctrl_dev_config *pcfg;
47 const struct device *clock_dev;
48 clock_control_subsys_t clock_name;
49 bool use2fs;
50 };
51
dmic_mcux_get_osr(uint32_t pcm_rate,uint32_t bit_clk,bool use_2fs)52 static int dmic_mcux_get_osr(uint32_t pcm_rate, uint32_t bit_clk, bool use_2fs)
53 {
54 uint32_t use2fs_div = use_2fs ? 1 : 2;
55
56 /* Note that the below calculation assumes the following:
57 * - DMIC DIVHFCLK is set to 0x0 (divide by 1)
58 * - DMIC PHY_HALF is set to 0x0 (standard sample rate)
59 */
60 return (uint32_t)(bit_clk / (2 * pcm_rate * use2fs_div));
61 }
62
63 /* Gets hardware channel index from logical channel */
dmic_mcux_hw_chan(struct mcux_dmic_drv_data * drv_data,uint8_t log_chan)64 static uint8_t dmic_mcux_hw_chan(struct mcux_dmic_drv_data *drv_data,
65 uint8_t log_chan)
66 {
67 enum pdm_lr lr;
68 uint8_t hw_chan;
69
70 /* This function assigns hardware channel "n" to the left channel,
71 * and hardware channel "n+1" to the right channel. This choice is
72 * arbitrary, but must be followed throughout the driver.
73 */
74 dmic_parse_channel_map(drv_data->chan_map_lo,
75 drv_data->chan_map_hi,
76 log_chan, &hw_chan, &lr);
77 if (lr == PDM_CHAN_LEFT) {
78 return hw_chan * 2;
79 } else {
80 return (hw_chan * 2) + 1;
81 }
82 }
83
dmic_mcux_activate_channels(struct mcux_dmic_drv_data * drv_data,bool enable)84 static void dmic_mcux_activate_channels(struct mcux_dmic_drv_data *drv_data,
85 bool enable)
86 {
87
88 /* PDM channel 0 must always be enabled, as the RM states:
89 * "In order to output 8 channels of PDM Data, PDM_CLK01 must be used"
90 * therefore, even if we don't intend to capture PDM data from the
91 * channel 0 FIFO, we still enable the channel so the clock is active.
92 */
93 uint32_t mask = 0x1;
94
95 for (uint8_t chan = 0; chan < drv_data->act_num_chan; chan++) {
96 /* Set bitmask of hw channel to enable */
97 mask |= BIT(dmic_mcux_hw_chan(drv_data, chan));
98 }
99
100 if (enable) {
101 DMIC_EnableChannnel(drv_data->base_address, mask);
102 } else {
103 /* No function to disable channels, we must bypass HAL here */
104 drv_data->base_address->CHANEN &= ~mask;
105 }
106 }
107
dmic_mcux_enable_dma(struct mcux_dmic_drv_data * drv_data,bool enable)108 static int dmic_mcux_enable_dma(struct mcux_dmic_drv_data *drv_data, bool enable)
109 {
110 struct mcux_dmic_pdm_chan *pdm_channel;
111 uint8_t num_chan = drv_data->act_num_chan;
112 uint8_t hw_chan;
113 int ret = 0;
114
115 for (uint8_t chan = 0; chan < num_chan; chan++) {
116 /* Parse the channel map data */
117 hw_chan = dmic_mcux_hw_chan(drv_data, chan);
118 pdm_channel = drv_data->pdm_channels[hw_chan];
119 if (enable) {
120 ret = dma_start(pdm_channel->dma, pdm_channel->dma_chan);
121 if (ret < 0) {
122 LOG_ERR("Could not start DMA for HW channel %d",
123 hw_chan);
124 return ret;
125 }
126 } else {
127 if (dma_stop(pdm_channel->dma, pdm_channel->dma_chan)) {
128 ret = -EIO;
129 }
130 }
131 DMIC_EnableChannelDma(drv_data->base_address,
132 (dmic_channel_t)hw_chan, enable);
133 }
134
135 return ret;
136 }
137
138 /* Helper to reload DMA engine for all active channels with new buffer */
dmic_mcux_reload_dma(struct mcux_dmic_drv_data * drv_data,void * buffer)139 static void dmic_mcux_reload_dma(struct mcux_dmic_drv_data *drv_data,
140 void *buffer)
141 {
142 int ret;
143 uint8_t hw_chan;
144 struct mcux_dmic_pdm_chan *pdm_channel;
145 uint8_t num_chan = drv_data->act_num_chan;
146 uint32_t dma_buf_size = drv_data->block_size / num_chan;
147 uint32_t src, dst;
148
149 /* This function reloads the DMA engine for all active DMA channels
150 * with the provided buffer. Each DMA channel will start
151 * at a different initial address to interleave channel data.
152 */
153 for (uint8_t chan = 0; chan < num_chan; chan++) {
154 /* Parse the channel map data */
155 hw_chan = dmic_mcux_hw_chan(drv_data, chan);
156 pdm_channel = drv_data->pdm_channels[hw_chan];
157 src = DMIC_FifoGetAddress(drv_data->base_address, hw_chan);
158 dst = (uint32_t)(((uint16_t *)buffer) + chan);
159 ret = dma_reload(pdm_channel->dma, pdm_channel->dma_chan,
160 src, dst, dma_buf_size);
161 if (ret < 0) {
162 LOG_ERR("Could not reload DMIC HW channel %d", hw_chan);
163 return;
164 }
165 }
166 }
167
168 /* Helper to get next buffer index for DMA */
dmic_mcux_next_buf_idx(uint8_t current_idx)169 static uint8_t dmic_mcux_next_buf_idx(uint8_t current_idx)
170 {
171 if ((current_idx + 1) == CONFIG_DMIC_MCUX_DMA_BUFFERS) {
172 return 0;
173 }
174 return current_idx + 1;
175 }
176
dmic_mcux_stop(struct mcux_dmic_drv_data * drv_data)177 static int dmic_mcux_stop(struct mcux_dmic_drv_data *drv_data)
178 {
179 /* Disable active channels */
180 dmic_mcux_activate_channels(drv_data, false);
181 /* Disable DMA */
182 dmic_mcux_enable_dma(drv_data, false);
183
184 /* Free all memory slabs */
185 for (uint32_t i = 0; i < CONFIG_DMIC_MCUX_DMA_BUFFERS; i++) {
186 k_mem_slab_free(drv_data->mem_slab, drv_data->dma_bufs[i]);
187 }
188
189 /* Purge the RX queue as well. */
190 k_msgq_purge(drv_data->rx_queue);
191
192 drv_data->dmic_state = DMIC_STATE_CONFIGURED;
193
194 return 0;
195 }
196
dmic_mcux_dma_cb(const struct device * dev,void * user_data,uint32_t channel,int status)197 static void dmic_mcux_dma_cb(const struct device *dev, void *user_data,
198 uint32_t channel, int status)
199 {
200
201 struct mcux_dmic_drv_data *drv_data = (struct mcux_dmic_drv_data *)user_data;
202 int ret;
203 void *done_buffer = drv_data->dma_bufs[drv_data->active_buf_idx];
204 void *new_buffer;
205
206 LOG_DBG("CB: channel is %u", channel);
207
208 if (status < 0) {
209 /* DMA has failed, free allocated blocks */
210 LOG_ERR("DMA reports error");
211 dmic_mcux_enable_dma(drv_data, false);
212 dmic_mcux_activate_channels(drv_data, false);
213 /* Free all allocated DMA buffers */
214 dmic_mcux_stop(drv_data);
215 drv_data->dmic_state = DMIC_STATE_ERROR;
216 return;
217 }
218
219 /* Before we queue the current buffer, make sure we can allocate
220 * another one to replace it.
221 */
222 ret = k_mem_slab_alloc(drv_data->mem_slab, &new_buffer, K_NO_WAIT);
223 if (ret < 0) {
224 /* We can't allocate a new buffer to replace the current
225 * one, so we cannot release the current buffer to the
226 * rx queue (or the DMA would stave). Therefore, we just
227 * leave the current buffer in place to be overwritten
228 * by the DMA.
229 */
230 LOG_ERR("Could not allocate RX buffer. Dropping RX data");
231 drv_data->dmic_state = DMIC_STATE_ERROR;
232 /* Reload DMA */
233 dmic_mcux_reload_dma(drv_data, done_buffer);
234 /* Advance active buffer index */
235 drv_data->active_buf_idx =
236 dmic_mcux_next_buf_idx(drv_data->active_buf_idx);
237 return;
238 }
239
240 /* DMA issues an interrupt at the completion of every block.
241 * we should put the active buffer into the rx queue for the
242 * application to read. The application is responsible for
243 * freeing this buffer once it processes it.
244 */
245 ret = k_msgq_put(drv_data->rx_queue, &done_buffer, K_NO_WAIT);
246 if (ret < 0) {
247 /* Free the newly allocated buffer, we won't need it. */
248 k_mem_slab_free(drv_data->mem_slab, new_buffer);
249 /* We cannot enqueue the current buffer, so we will drop
250 * the current buffer data and leave the current buffer
251 * in place to be overwritten by the DMA
252 */
253 LOG_ERR("RX queue overflow, dropping RX buffer data");
254 drv_data->dmic_state = DMIC_STATE_ERROR;
255 /* Reload DMA */
256 dmic_mcux_reload_dma(drv_data, done_buffer);
257 /* Advance active buffer index */
258 drv_data->active_buf_idx =
259 dmic_mcux_next_buf_idx(drv_data->active_buf_idx);
260 return;
261 }
262
263 /* Previous buffer was enqueued, and new buffer is allocated.
264 * Replace pointer to previous buffer in our dma slots array,
265 * and reload DMA with next buffer.
266 */
267 drv_data->dma_bufs[drv_data->active_buf_idx] = new_buffer;
268 dmic_mcux_reload_dma(drv_data, new_buffer);
269 /* Advance active buffer index */
270 drv_data->active_buf_idx = dmic_mcux_next_buf_idx(drv_data->active_buf_idx);
271 }
272
dmic_mcux_setup_dma(const struct device * dev)273 static int dmic_mcux_setup_dma(const struct device *dev)
274 {
275 struct mcux_dmic_drv_data *drv_data = dev->data;
276 struct mcux_dmic_pdm_chan *pdm_channel;
277 struct dma_block_config blk_cfg[CONFIG_DMIC_MCUX_DMA_BUFFERS] = {0};
278 struct dma_config dma_cfg = {0};
279 uint8_t num_chan = drv_data->act_num_chan;
280 uint32_t dma_buf_size = drv_data->block_size / num_chan;
281 uint8_t dma_buf_idx = 0;
282 void *dma_buf = drv_data->dma_bufs[dma_buf_idx];
283 uint8_t hw_chan;
284 int ret = 0;
285
286
287 /* Setup DMA configuration common between all channels */
288 dma_cfg.user_data = drv_data;
289 dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY;
290 dma_cfg.source_data_size = sizeof(uint16_t); /* Each sample is 16 bits */
291 dma_cfg.dest_data_size = sizeof(uint16_t);
292 dma_cfg.block_count = CONFIG_DMIC_MCUX_DMA_BUFFERS;
293 dma_cfg.head_block = &blk_cfg[0];
294 dma_cfg.complete_callback_en = 1; /* Callback at each block */
295 dma_cfg.dma_callback = dmic_mcux_dma_cb;
296
297 /* When multiple channels are enabled simultaneously, the DMA
298 * completion interrupt from one channel will signal that DMA data
299 * from multiple channels may be collected, provided the same
300 * amount of data was transferred. Therefore, we only enable the
301 * DMA completion callback for the first channel we setup
302 */
303 for (uint8_t chan = 0; chan < num_chan; chan++) {
304 /* Parse the channel map data */
305 hw_chan = dmic_mcux_hw_chan(drv_data, chan);
306 /* Configure blocks for hw_chan */
307 for (uint32_t blk = 0; blk < CONFIG_DMIC_MCUX_DMA_BUFFERS; blk++) {
308 blk_cfg[blk].source_address =
309 DMIC_FifoGetAddress(drv_data->base_address, hw_chan);
310 /* We interleave samples within the output buffer
311 * based on channel map. So for a channel map like so:
312 * [pdm0_l, pdm0_r, pdm1_r, pdm1_l]
313 * the resulting DMA buffer would look like:
314 * [pdm0_l_s0, pdm0_r_s0, pdm1_r_s0, pdm1_l_s0,
315 * pdm0_l_s1, pdm0_r_s1, pdm1_r_s1, pdm1_l_s1, ...]
316 * Each sample is 16 bits wide.
317 */
318 blk_cfg[blk].dest_address =
319 (uint32_t)(((uint16_t *)dma_buf) + chan);
320 blk_cfg[blk].dest_scatter_interval =
321 num_chan * sizeof(uint16_t);
322 blk_cfg[blk].dest_scatter_en = 1;
323 blk_cfg[blk].source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
324 blk_cfg[blk].dest_addr_adj = DMA_ADDR_ADJ_INCREMENT;
325 blk_cfg[blk].block_size = dma_buf_size;
326 /* Enable circular mode- when the final DMA block
327 * is exhausted, we want the DMA controller
328 * to restart with the first one.
329 */
330 blk_cfg[blk].source_reload_en = 1;
331 blk_cfg[blk].dest_reload_en = 1;
332 if (blk < (CONFIG_DMIC_MCUX_DMA_BUFFERS - 1)) {
333 blk_cfg[blk].next_block = &blk_cfg[blk + 1];
334 } else {
335 /* Last block, enable circular reload */
336 blk_cfg[blk].next_block = NULL;
337 }
338 /* Select next dma buffer in array */
339 dma_buf_idx = dmic_mcux_next_buf_idx(dma_buf_idx);
340 dma_buf = drv_data->dma_bufs[dma_buf_idx];
341 }
342 pdm_channel = drv_data->pdm_channels[hw_chan];
343 /* Set configuration for hw_chan_0 */
344 ret = dma_config(pdm_channel->dma, pdm_channel->dma_chan, &dma_cfg);
345 if (ret < 0) {
346 LOG_ERR("Could not configure DMIC channel %d", hw_chan);
347 return ret;
348 }
349 /* First channel is configured. Do not install callbacks for
350 * other channels.
351 */
352 dma_cfg.dma_callback = NULL;
353 }
354
355 return 0;
356 }
357
358 /* Initializes a DMIC hardware channel */
dmic_mcux_init_channel(const struct device * dev,uint32_t osr,uint8_t chan,enum pdm_lr lr)359 static int dmic_mcux_init_channel(const struct device *dev, uint32_t osr,
360 uint8_t chan, enum pdm_lr lr)
361 {
362 struct mcux_dmic_drv_data *drv_data = dev->data;
363
364 if (!drv_data->pdm_channels[chan]) {
365 /* Channel disabled at devicetree level */
366 return -EINVAL;
367 }
368
369 drv_data->pdm_channels[chan]->dmic_channel_cfg.osr = osr;
370 /* Configure channel settings */
371 DMIC_ConfigChannel(drv_data->base_address, (dmic_channel_t)chan,
372 lr == PDM_CHAN_LEFT ? kDMIC_Left : kDMIC_Right,
373 &drv_data->pdm_channels[chan]->dmic_channel_cfg);
374 /* Setup channel FIFO. We use maximum threshold to avoid triggering
375 * DMA too frequently
376 */
377 DMIC_FifoChannel(drv_data->base_address, chan, 15, true, true);
378 /* Disable interrupts. DMA will be enabled in dmic_mcux_trigger. */
379 DMIC_EnableChannelInterrupt(drv_data->base_address, chan, false);
380 return 0;
381 }
382
mcux_dmic_init(const struct device * dev)383 static int mcux_dmic_init(const struct device *dev)
384 {
385 const struct mcux_dmic_cfg *config = dev->config;
386 struct mcux_dmic_drv_data *drv_data = dev->data;
387 int ret;
388
389 ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
390 if (ret < 0) {
391 return ret;
392 }
393 DMIC_Init(drv_data->base_address);
394 DMIC_Use2fs(drv_data->base_address, config->use2fs);
395 #if !(defined(FSL_FEATURE_DMIC_HAS_NO_IOCFG) && FSL_FEATURE_DMIC_HAS_NO_IOCFG)
396 /* Set IO to dual mode */
397 DMIC_SetIOCFG(drv_data->base_address, kDMIC_PdmDual);
398 #endif
399 drv_data->dmic_state = DMIC_STATE_INITIALIZED;
400 return 0;
401 }
402
dmic_mcux_configure(const struct device * dev,struct dmic_cfg * config)403 static int dmic_mcux_configure(const struct device *dev,
404 struct dmic_cfg *config)
405 {
406
407 const struct mcux_dmic_cfg *drv_config = dev->config;
408 struct mcux_dmic_drv_data *drv_data = dev->data;
409 struct pdm_chan_cfg *channel = &config->channel;
410 struct pcm_stream_cfg *stream = &config->streams[0];
411 enum pdm_lr lr_0 = 0, lr_1 = 0;
412 uint8_t hw_chan_0 = 0, hw_chan_1 = 0;
413 uint32_t bit_clk_rate, osr;
414 int ret;
415
416 if (drv_data->dmic_state == DMIC_STATE_ACTIVE) {
417 LOG_ERR("Cannot configure device while it is active");
418 return -EBUSY;
419 }
420
421 /* Only one active channel is supported */
422 if (channel->req_num_streams != 1) {
423 return -EINVAL;
424 }
425
426 /* DMIC supports up to 8 active channels. Verify user is not
427 * requesting more
428 */
429 if (channel->req_num_chan > FSL_FEATURE_DMIC_CHANNEL_NUM) {
430 LOG_ERR("DMIC only supports 8 channels or less");
431 return -ENOTSUP;
432 }
433
434 if (stream->pcm_rate == 0 || stream->pcm_width == 0) {
435 if (drv_data->dmic_state == DMIC_STATE_CONFIGURED) {
436 DMIC_DeInit(drv_data->base_address);
437 drv_data->dmic_state = DMIC_STATE_UNINIT;
438 }
439 return 0;
440 }
441
442 /* If DMIC was deinitialized, reinit here */
443 if (drv_data->dmic_state == DMIC_STATE_UNINIT) {
444 ret = mcux_dmic_init(dev);
445 if (ret < 0) {
446 LOG_ERR("Could not reinit DMIC");
447 return ret;
448 }
449 }
450
451 /* Currently, we only support 16 bit samples. This is because the DMIC
452 * API dictates that samples should be interleaved between channels,
453 * IE: {C0, C1, C2, C0, C1, C2}. To achieve this we must use the
454 * "destination address increment" function of the LPC DMA IP. Since
455 * the LPC DMA IP does not support 3 byte wide transfers, we cannot
456 * effectively use destination address increments to interleave 24
457 * bit samples.
458 */
459 if (stream->pcm_width != 16) {
460 LOG_ERR("Only 16 bit samples are supported");
461 return -ENOTSUP;
462 }
463
464 ret = clock_control_get_rate(drv_config->clock_dev,
465 drv_config->clock_name, &bit_clk_rate);
466 if (ret < 0) {
467 return ret;
468 }
469
470 /* Check bit clock rate versus what user requested */
471 if ((config->io.min_pdm_clk_freq > bit_clk_rate) ||
472 (config->io.max_pdm_clk_freq < bit_clk_rate)) {
473 return -EINVAL;
474 }
475 /* Calculate the required OSR divider based on the PCM bit clock
476 * rate to the DMIC.
477 */
478 osr = dmic_mcux_get_osr(stream->pcm_rate, bit_clk_rate, drv_config->use2fs);
479 /* Now, parse the channel map and set up each channel we should
480 * make active. We parse two channels at once, that way we can
481 * check to make sure that the L/R channels of each PDM controller
482 * are adjacent.
483 */
484 channel->act_num_chan = 0;
485 /* Save channel request data */
486 drv_data->chan_map_lo = channel->req_chan_map_lo;
487 drv_data->chan_map_hi = channel->req_chan_map_hi;
488 for (uint8_t chan = 0; chan < channel->req_num_chan; chan += 2) {
489 /* Get the channel map data for channel pair */
490 dmic_parse_channel_map(channel->req_chan_map_lo,
491 channel->req_chan_map_hi,
492 chan, &hw_chan_0, &lr_0);
493 if ((chan + 1) < channel->req_num_chan) {
494 /* Paired channel is enabled */
495 dmic_parse_channel_map(channel->req_chan_map_lo,
496 channel->req_chan_map_hi,
497 chan + 1, &hw_chan_1, &lr_1);
498 /* Verify that paired channels use same hardware index */
499 if ((lr_0 == lr_1) ||
500 (hw_chan_0 != hw_chan_1)) {
501 return -EINVAL;
502 }
503 }
504 /* Configure selected channels in DMIC */
505 ret = dmic_mcux_init_channel(dev, osr,
506 dmic_mcux_hw_chan(drv_data, chan),
507 lr_0);
508 if (ret < 0) {
509 return ret;
510 }
511 channel->act_num_chan++;
512 if ((chan + 1) < channel->req_num_chan) {
513 /* Paired channel is enabled */
514 ret = dmic_mcux_init_channel(dev, osr,
515 dmic_mcux_hw_chan(drv_data,
516 chan + 1),
517 lr_1);
518 if (ret < 0) {
519 return ret;
520 }
521 channel->act_num_chan++;
522 }
523 }
524
525 channel->act_chan_map_lo = channel->req_chan_map_lo;
526 channel->act_chan_map_hi = channel->req_chan_map_hi;
527
528 drv_data->mem_slab = stream->mem_slab;
529 drv_data->block_size = stream->block_size;
530 drv_data->act_num_chan = channel->act_num_chan;
531 drv_data->dmic_state = DMIC_STATE_CONFIGURED;
532
533 return 0;
534 }
535
dmic_mcux_start(const struct device * dev)536 static int dmic_mcux_start(const struct device *dev)
537 {
538 struct mcux_dmic_drv_data *drv_data = dev->data;
539 int ret;
540
541 /* Allocate the initial set of buffers reserved for use by the hardware.
542 * We queue buffers so that when the DMA is operating on buffer "n",
543 * buffer "n+1" is already queued in the DMA hardware. When buffer "n"
544 * completes, we allocate another buffer and add it to the tail of the
545 * DMA descriptor chain. This approach requires the driver to allocate
546 * a minimum of two buffers
547 */
548
549 for (uint32_t i = 0; i < CONFIG_DMIC_MCUX_DMA_BUFFERS; i++) {
550 /* Allocate buffers for DMA */
551 ret = k_mem_slab_alloc(drv_data->mem_slab,
552 &drv_data->dma_bufs[i], K_NO_WAIT);
553 if (ret < 0) {
554 LOG_ERR("failed to allocate buffer");
555 return -ENOBUFS;
556 }
557 }
558
559 ret = dmic_mcux_setup_dma(dev);
560 if (ret < 0) {
561 return ret;
562 }
563
564 ret = dmic_mcux_enable_dma(drv_data, true);
565 if (ret < 0) {
566 return ret;
567 }
568 dmic_mcux_activate_channels(drv_data, true);
569
570 return 0;
571 }
572
dmic_mcux_trigger(const struct device * dev,enum dmic_trigger cmd)573 static int dmic_mcux_trigger(const struct device *dev,
574 enum dmic_trigger cmd)
575 {
576 struct mcux_dmic_drv_data *drv_data = dev->data;
577
578 switch (cmd) {
579 case DMIC_TRIGGER_PAUSE:
580 /* Disable active channels */
581 if (drv_data->dmic_state == DMIC_STATE_ACTIVE) {
582 dmic_mcux_activate_channels(drv_data, false);
583 }
584 drv_data->dmic_state = DMIC_STATE_PAUSED;
585 break;
586 case DMIC_TRIGGER_STOP:
587 if (drv_data->dmic_state == DMIC_STATE_ACTIVE) {
588 dmic_mcux_stop(drv_data);
589 }
590 drv_data->dmic_state = DMIC_STATE_CONFIGURED;
591 break;
592 case DMIC_TRIGGER_RELEASE:
593 /* Enable active channels */
594 if (drv_data->dmic_state == DMIC_STATE_PAUSED) {
595 dmic_mcux_activate_channels(drv_data, true);
596 }
597 drv_data->dmic_state = DMIC_STATE_ACTIVE;
598 break;
599 case DMIC_TRIGGER_START:
600 if ((drv_data->dmic_state != DMIC_STATE_CONFIGURED) &&
601 (drv_data->dmic_state != DMIC_STATE_ACTIVE)) {
602 LOG_ERR("Device is not configured");
603 return -EIO;
604 } else if (drv_data->dmic_state != DMIC_STATE_ACTIVE) {
605 if (dmic_mcux_start(dev) < 0) {
606 LOG_ERR("Could not start DMIC");
607 return -EIO;
608 }
609 drv_data->dmic_state = DMIC_STATE_ACTIVE;
610 }
611 break;
612 case DMIC_TRIGGER_RESET:
613 /* Reset DMIC to uninitialized state */
614 DMIC_DeInit(drv_data->base_address);
615 drv_data->dmic_state = DMIC_STATE_UNINIT;
616 break;
617 default:
618 LOG_ERR("Invalid command: %d", cmd);
619 return -EINVAL;
620 }
621 return 0;
622 }
623
dmic_mcux_read(const struct device * dev,uint8_t stream,void ** buffer,size_t * size,int32_t timeout)624 static int dmic_mcux_read(const struct device *dev,
625 uint8_t stream,
626 void **buffer, size_t *size, int32_t timeout)
627 {
628 struct mcux_dmic_drv_data *drv_data = dev->data;
629 int ret;
630
631 ARG_UNUSED(stream);
632
633 if (drv_data->dmic_state == DMIC_STATE_ERROR) {
634 LOG_ERR("Device reports an error, please reset and reconfigure it");
635 return -EIO;
636 }
637
638 if ((drv_data->dmic_state != DMIC_STATE_CONFIGURED) &&
639 (drv_data->dmic_state != DMIC_STATE_ACTIVE) &&
640 (drv_data->dmic_state != DMIC_STATE_PAUSED)) {
641 LOG_ERR("Device state is not valid for read");
642 return -EIO;
643 }
644
645 ret = k_msgq_get(drv_data->rx_queue, buffer, SYS_TIMEOUT_MS(timeout));
646 if (ret < 0) {
647 return ret;
648 }
649 *size = drv_data->block_size;
650
651 LOG_DBG("read buffer = %p", *buffer);
652 return 0;
653 }
654
655 static const struct _dmic_ops dmic_ops = {
656 .configure = dmic_mcux_configure,
657 .trigger = dmic_mcux_trigger,
658 .read = dmic_mcux_read,
659 };
660
661 /* Converts integer gainshift into 5 bit 2's complement value for GAINSHIFT reg */
662 #define PDM_DMIC_GAINSHIFT(val) \
663 (val >= 0) ? (val & 0xF) : (BIT(4) | (0x10 - (val & 0xF)))
664
665 /* Defines structure for a given PDM channel node */
666 #define PDM_DMIC_CHAN_DEFINE(pdm_node) \
667 static struct mcux_dmic_pdm_chan \
668 pdm_channel_##pdm_node = { \
669 .dma = DEVICE_DT_GET(DT_DMAS_CTLR(pdm_node)), \
670 .dma_chan = DT_DMAS_CELL_BY_IDX(pdm_node, 0, channel), \
671 .dmic_channel_cfg = { \
672 .gainshft = PDM_DMIC_GAINSHIFT(DT_PROP(pdm_node, \
673 gainshift)), \
674 .preac2coef = DT_ENUM_IDX(pdm_node, compensation_2fs), \
675 .preac4coef = DT_ENUM_IDX(pdm_node, compensation_4fs), \
676 .dc_cut_level = DT_ENUM_IDX(pdm_node, dc_cutoff), \
677 .post_dc_gain_reduce = DT_PROP(pdm_node, dc_gain), \
678 .sample_rate = kDMIC_PhyFullSpeed, \
679 .saturate16bit = 1U, \
680 }, \
681 };
682
683 /* Defines structures for all enabled PDM channels */
684 #define PDM_DMIC_CHANNELS_DEFINE(idx) \
685 DT_INST_FOREACH_CHILD_STATUS_OKAY(idx, PDM_DMIC_CHAN_DEFINE)
686
687 /* Gets pointer for a given PDM channel node */
688 #define PDM_DMIC_CHAN_GET(pdm_node) \
689 COND_CODE_1(DT_NODE_HAS_STATUS_OKAY(pdm_node), \
690 (&pdm_channel_##pdm_node), (NULL)),
691
692 /* Gets array of pointers to PDM channels */
693 #define PDM_DMIC_CHANNELS_GET(idx) \
694 DT_INST_FOREACH_CHILD(idx, PDM_DMIC_CHAN_GET)
695
696 #define MCUX_DMIC_DEVICE(idx) \
697 PDM_DMIC_CHANNELS_DEFINE(idx); \
698 static struct mcux_dmic_pdm_chan \
699 *pdm_channels##idx[FSL_FEATURE_DMIC_CHANNEL_NUM] = { \
700 PDM_DMIC_CHANNELS_GET(idx) \
701 }; \
702 K_MSGQ_DEFINE(dmic_msgq##idx, sizeof(void *), \
703 CONFIG_DMIC_MCUX_QUEUE_SIZE, 1); \
704 static struct mcux_dmic_drv_data mcux_dmic_data##idx = { \
705 .pdm_channels = pdm_channels##idx, \
706 .base_address = (DMIC_Type *) DT_INST_REG_ADDR(idx), \
707 .dmic_state = DMIC_STATE_UNINIT, \
708 .rx_queue = &dmic_msgq##idx, \
709 .active_buf_idx = 0U, \
710 }; \
711 \
712 PINCTRL_DT_INST_DEFINE(idx); \
713 static struct mcux_dmic_cfg mcux_dmic_cfg##idx = { \
714 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(idx), \
715 .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(idx)), \
716 .clock_name = (clock_control_subsys_t) \
717 DT_INST_CLOCKS_CELL(idx, name), \
718 .use2fs = DT_INST_PROP(idx, use2fs), \
719 }; \
720 \
721 DEVICE_DT_INST_DEFINE(idx, mcux_dmic_init, NULL, \
722 &mcux_dmic_data##idx, &mcux_dmic_cfg##idx, \
723 POST_KERNEL, CONFIG_AUDIO_DMIC_INIT_PRIORITY, \
724 &dmic_ops);
725
726 /* Existing SoCs only have one PDM instance. */
727 DT_INST_FOREACH_STATUS_OKAY(MCUX_DMIC_DEVICE)
728