1 /*
2 * Copyright (c) 2022 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT intel_dai_dmic
8 #define LOG_DOMAIN dai_intel_dmic
9 #include <zephyr/logging/log.h>
10 LOG_MODULE_REGISTER(LOG_DOMAIN);
11
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <zephyr/kernel.h>
16 #include <zephyr/spinlock.h>
17 #include <zephyr/devicetree.h>
18 #include <zephyr/pm/device.h>
19 #include <zephyr/pm/device_runtime.h>
20
21 #include <zephyr/drivers/dai.h>
22 #include <zephyr/irq.h>
23
24 #include "dmic.h"
25 #include <dmic_regs.h>
26
27 /* Base addresses (in PDM scope) of 2ch PDM controllers and coefficient RAM. */
28 static const uint32_t dmic_base[4] = {PDM0, PDM1, PDM2, PDM3};
29
30 /* global data shared between all dmic instances */
31 struct dai_dmic_global_shared dai_dmic_global;
32
33 /* Helper macro to read 64-bit data using two 32-bit data read */
34 #define sys_read64(addr) (((uint64_t)(sys_read32(addr + 4)) << 32) | \
35 sys_read32(addr))
36
37 int dai_dmic_set_config_nhlt(struct dai_intel_dmic *dmic, const void *spec_config);
38
39 /* Exponent function for small values of x. This function calculates
40 * fairly accurately exponent for x in range -2.0 .. +2.0. The iteration
41 * uses first 11 terms of Taylor series approximation for exponent
42 * function. With the current scaling the numerator just remains under
43 * 64 bits with the 11 terms.
44 *
45 * See https://en.wikipedia.org/wiki/Exponential_function#Computation
46 *
47 * The input is Q3.29
48 * The output is Q9.23
49 */
exp_small_fixed(int32_t x)50 static int32_t exp_small_fixed(int32_t x)
51 {
52 int64_t p;
53 int64_t num = Q_SHIFT_RND(x, 29, 23);
54 int32_t y = (int32_t)num;
55 int32_t den = 1;
56 int32_t inc;
57 int k;
58
59 /* Numerator is x^k, denominator is k! */
60 for (k = 2; k < 12; k++) {
61 p = num * x; /* Q9.23 x Q3.29 -> Q12.52 */
62 num = Q_SHIFT_RND(p, 52, 23);
63 den = den * k;
64 inc = (int32_t)(num / den);
65 y += inc;
66 }
67
68 return y + ONE_Q23;
69 }
70
exp_fixed(int32_t x)71 static int32_t exp_fixed(int32_t x)
72 {
73 int32_t xs;
74 int32_t y;
75 int32_t z;
76 int i;
77 int n = 0;
78
79 if (x < Q_CONVERT_FLOAT(-11.5, 27)) {
80 return 0;
81 }
82
83 if (x > Q_CONVERT_FLOAT(7.6245, 27)) {
84 return INT32_MAX;
85 }
86
87 /* x is Q5.27 */
88 xs = x;
89 while (xs >= TWO_Q27 || xs <= MINUS_TWO_Q27) {
90 xs >>= 1;
91 n++;
92 }
93
94 /* exp_small_fixed() input is Q3.29, while x1 is Q5.27
95 * exp_small_fixed() output is Q9.23, while z is Q12.20
96 */
97 z = Q_SHIFT_RND(exp_small_fixed(Q_SHIFT_LEFT(xs, 27, 29)), 23, 20);
98 y = ONE_Q20;
99 for (i = 0; i < (1 << n); i++) {
100 y = (int32_t)Q_MULTSR_32X32((int64_t)y, z, 20, 20, 20);
101 }
102
103 return y;
104 }
105
db2lin_fixed(int32_t db)106 static int32_t db2lin_fixed(int32_t db)
107 {
108 int32_t arg;
109
110 if (db < Q_CONVERT_FLOAT(-100.0, 24)) {
111 return 0;
112 }
113
114 /* Q8.24 x Q5.27, result needs to be Q5.27 */
115 arg = (int32_t)Q_MULTSR_32X32((int64_t)db, LOG10_DIV20_Q27, 24, 27, 27);
116 return exp_fixed(arg);
117 }
118
dai_dmic_update_bits(const struct dai_intel_dmic * dmic,uint32_t reg,uint32_t mask,uint32_t val)119 static void dai_dmic_update_bits(const struct dai_intel_dmic *dmic,
120 uint32_t reg, uint32_t mask, uint32_t val)
121 {
122 uint32_t dest = dmic->reg_base + reg;
123
124 sys_write32((sys_read32(dest) & (~mask)) | (val & mask), dest);
125 }
126
dai_dmic_write(const struct dai_intel_dmic * dmic,uint32_t reg,uint32_t val)127 static inline void dai_dmic_write(const struct dai_intel_dmic *dmic,
128 uint32_t reg, uint32_t val)
129 {
130 sys_write32(val, dmic->reg_base + reg);
131 }
132
dai_dmic_read(const struct dai_intel_dmic * dmic,uint32_t reg)133 static inline uint32_t dai_dmic_read(const struct dai_intel_dmic *dmic,
134 uint32_t reg)
135 {
136 return sys_read32(dmic->reg_base + reg);
137 }
138
139 #if CONFIG_DAI_DMIC_HAS_OWNERSHIP
dai_dmic_claim_ownership(const struct dai_intel_dmic * dmic)140 static inline void dai_dmic_claim_ownership(const struct dai_intel_dmic *dmic)
141 {
142 /* DMIC Owner Select to DSP */
143 sys_write32(sys_read32(dmic->shim_base + DMICLCTL_OFFSET) |
144 FIELD_PREP(DMICLCTL_OSEL, 0x3), dmic->shim_base + DMICLCTL_OFFSET);
145 }
146
dai_dmic_release_ownership(const struct dai_intel_dmic * dmic)147 static inline void dai_dmic_release_ownership(const struct dai_intel_dmic *dmic)
148 {
149 /* DMIC Owner Select back to Host CPU + DSP */
150 sys_write32(sys_read32(dmic->shim_base + DMICLCTL_OFFSET) &
151 ~DMICLCTL_OSEL, dmic->shim_base + DMICLCTL_OFFSET);
152 }
153
154 #else /* CONFIG_DAI_DMIC_HAS_OWNERSHIP */
155
dai_dmic_claim_ownership(const struct dai_intel_dmic * dmic)156 static inline void dai_dmic_claim_ownership(const struct dai_intel_dmic *dmic) {}
dai_dmic_release_ownership(const struct dai_intel_dmic * dmic)157 static inline void dai_dmic_release_ownership(const struct dai_intel_dmic *dmic) {}
158
159 #endif /* CONFIG_DAI_DMIC_HAS_OWNERSHIP */
160
dai_dmic_base(const struct dai_intel_dmic * dmic)161 static inline uint32_t dai_dmic_base(const struct dai_intel_dmic *dmic)
162 {
163 #if defined(CONFIG_SOC_INTEL_ACE20_LNL) || defined(CONFIG_SOC_INTEL_ACE30)
164 return dmic->hdamldmic_base;
165 #else
166 return dmic->shim_base;
167 #endif
168 }
169
170 #if CONFIG_DAI_DMIC_HAS_MULTIPLE_LINE_SYNC
dai_dmic_set_sync_period(uint32_t period,const struct dai_intel_dmic * dmic)171 static inline void dai_dmic_set_sync_period(uint32_t period, const struct dai_intel_dmic *dmic)
172 {
173 uint32_t val = CONFIG_DAI_DMIC_HW_IOCLK / period - 1;
174 uint32_t base = dai_dmic_base(dmic);
175 /* DMIC Change sync period */
176 #if defined(CONFIG_SOC_INTEL_ACE20_LNL) || defined(CONFIG_SOC_INTEL_ACE30)
177 sys_write32(sys_read32(base + DMICSYNC_OFFSET) | FIELD_PREP(DMICSYNC_SYNCPRD, val),
178 base + DMICSYNC_OFFSET);
179 sys_write32(sys_read32(base + DMICSYNC_OFFSET) | DMICSYNC_SYNCPU,
180 base + DMICSYNC_OFFSET);
181
182 if (!WAIT_FOR((sys_read32(base + DMICSYNC_OFFSET) & DMICSYNC_SYNCPU) == 0, 1000,
183 k_sleep(K_USEC(100)))) {
184 LOG_ERR("poll timeout");
185 }
186
187 sys_write32(sys_read32(base + DMICSYNC_OFFSET) | DMICSYNC_CMDSYNC,
188 base + DMICSYNC_OFFSET);
189 #else /* All other CAVS and ACE platforms */
190 sys_write32(sys_read32(base + DMICSYNC_OFFSET) | FIELD_PREP(DMICSYNC_SYNCPRD, val),
191 base + DMICSYNC_OFFSET);
192 sys_write32(sys_read32(base + DMICSYNC_OFFSET) | DMICSYNC_CMDSYNC,
193 base + DMICSYNC_OFFSET);
194 #endif
195 }
196
dai_dmic_clear_sync_period(const struct dai_intel_dmic * dmic)197 static inline void dai_dmic_clear_sync_period(const struct dai_intel_dmic *dmic)
198 {
199 uint32_t base = dai_dmic_base(dmic);
200 /* DMIC Clean sync period */
201 sys_write32(sys_read32(base + DMICSYNC_OFFSET) & ~DMICSYNC_SYNCPRD,
202 base + DMICSYNC_OFFSET);
203 sys_write32(sys_read32(base + DMICSYNC_OFFSET) & ~DMICSYNC_CMDSYNC,
204 base + DMICSYNC_OFFSET);
205 }
206
207 /* Preparing for command synchronization on multiple link segments */
dai_dmic_sync_prepare(const struct dai_intel_dmic * dmic)208 static inline void dai_dmic_sync_prepare(const struct dai_intel_dmic *dmic)
209 {
210 uint32_t base = dai_dmic_base(dmic);
211
212 sys_write32(sys_read32(base + DMICSYNC_OFFSET) | DMICSYNC_CMDSYNC,
213 base + DMICSYNC_OFFSET);
214 }
215
216 /* Trigering synchronization of command execution */
dmic_sync_trigger(const struct dai_intel_dmic * dmic)217 static void dmic_sync_trigger(const struct dai_intel_dmic *dmic)
218 {
219 uint32_t base = dai_dmic_base(dmic);
220
221 __ASSERT_NO_MSG((sys_read32(base + DMICSYNC_OFFSET) & DMICSYNC_CMDSYNC) != 0);
222
223 sys_write32(sys_read32(base + DMICSYNC_OFFSET) |
224 DMICSYNC_SYNCGO, base + DMICSYNC_OFFSET);
225
226 /* waiting for CMDSYNC bit clearing */
227 if (!WAIT_FOR((sys_read32(base + DMICSYNC_OFFSET) & DMICSYNC_CMDSYNC) == 0,
228 1000, k_sleep(K_USEC(100)))) {
229 LOG_ERR("poll timeout");
230 }
231 }
232
233 #else /* CONFIG_DAI_DMIC_HAS_MULTIPLE_LINE_SYNC */
234
dai_dmic_set_sync_period(uint32_t period,const struct dai_intel_dmic * dmic)235 static inline void dai_dmic_set_sync_period(uint32_t period, const struct dai_intel_dmic *dmic) {}
dai_dmic_clear_sync_period(const struct dai_intel_dmic * dmic)236 static inline void dai_dmic_clear_sync_period(const struct dai_intel_dmic *dmic) {}
dai_dmic_sync_prepare(const struct dai_intel_dmic * dmic)237 static inline void dai_dmic_sync_prepare(const struct dai_intel_dmic *dmic) {}
dmic_sync_trigger(const struct dai_intel_dmic * dmic)238 static void dmic_sync_trigger(const struct dai_intel_dmic *dmic) {}
239
240 #endif /* CONFIG_DAI_DMIC_HAS_MULTIPLE_LINE_SYNC */
241
dai_dmic_start_fifo_packers(struct dai_intel_dmic * dmic,int fifo_index)242 static void dai_dmic_start_fifo_packers(struct dai_intel_dmic *dmic, int fifo_index)
243 {
244
245 /* Start FIFO packers and clear FIFO initialize bits */
246 dai_dmic_update_bits(dmic, fifo_index * PDM_CHANNEL_REGS_SIZE + OUTCONTROL,
247 OUTCONTROL_SIP | OUTCONTROL_FINIT,
248 OUTCONTROL_SIP);
249 }
250
dai_dmic_stop_fifo_packers(struct dai_intel_dmic * dmic,int fifo_index)251 static void dai_dmic_stop_fifo_packers(struct dai_intel_dmic *dmic,
252 int fifo_index)
253 {
254 /* Stop FIFO packers and set FIFO initialize bits */
255 dai_dmic_update_bits(dmic, fifo_index * PDM_CHANNEL_REGS_SIZE + OUTCONTROL,
256 OUTCONTROL_SIP | OUTCONTROL_FINIT,
257 OUTCONTROL_FINIT);
258 }
259
dai_dmic_dis_clk_gating(const struct dai_intel_dmic * dmic)260 static inline void dai_dmic_dis_clk_gating(const struct dai_intel_dmic *dmic)
261 {
262 /* Disable DMIC clock gating */
263 #if (CONFIG_SOC_INTEL_ACE20_LNL || CONFIG_SOC_INTEL_ACE30)
264 sys_write32((sys_read32(dmic->vshim_base + DMICLVSCTL_OFFSET) | DMICLVSCTL_DCGD),
265 dmic->vshim_base + DMICLVSCTL_OFFSET);
266 #else
267 sys_write32((sys_read32(dmic->shim_base + DMICLCTL_OFFSET) | DMICLCTL_DCGD),
268 dmic->shim_base + DMICLCTL_OFFSET);
269 #endif
270 }
271
dai_dmic_en_clk_gating(const struct dai_intel_dmic * dmic)272 static inline void dai_dmic_en_clk_gating(const struct dai_intel_dmic *dmic)
273 {
274 /* Enable DMIC clock gating */
275 #if (CONFIG_SOC_INTEL_ACE20_LNL || CONFIG_SOC_INTEL_ACE30)
276 sys_write32((sys_read32(dmic->vshim_base + DMICLVSCTL_OFFSET) & ~DMICLVSCTL_DCGD),
277 dmic->vshim_base + DMICLVSCTL_OFFSET);
278 #else /* All other CAVS and ACE platforms */
279 sys_write32((sys_read32(dmic->shim_base + DMICLCTL_OFFSET) & ~DMICLCTL_DCGD),
280 dmic->shim_base + DMICLCTL_OFFSET);
281 #endif
282
283 }
284
dai_dmic_program_channel_map(const struct dai_intel_dmic * dmic,const struct dai_config * cfg,uint32_t index)285 static inline void dai_dmic_program_channel_map(const struct dai_intel_dmic *dmic,
286 const struct dai_config *cfg,
287 uint32_t index)
288 {
289 #if defined(CONFIG_SOC_INTEL_ACE20_LNL) || defined(CONFIG_SOC_INTEL_ACE30)
290 uint16_t pcmsycm = cfg->link_config;
291 uint32_t reg_add = dmic->shim_base + DMICXPCMSyCM_OFFSET + 0x0004*index;
292
293 sys_write16(pcmsycm, reg_add);
294 #else
295 ARG_UNUSED(dmic);
296 ARG_UNUSED(cfg);
297 ARG_UNUSED(index);
298 #endif /* defined(CONFIG_SOC_INTEL_ACE20_LNL) || defined(CONFIG_SOC_INTEL_ACE30) */
299 }
300
dai_dmic_en_power(const struct dai_intel_dmic * dmic)301 static inline void dai_dmic_en_power(const struct dai_intel_dmic *dmic)
302 {
303 uint32_t base = dai_dmic_base(dmic);
304 /* Enable DMIC power */
305 sys_write32((sys_read32(base + DMICLCTL_OFFSET) | DMICLCTL_SPA),
306 base + DMICLCTL_OFFSET);
307
308 #if defined(CONFIG_SOC_INTEL_ACE20_LNL) || defined(CONFIG_SOC_INTEL_ACE30)
309 while (!(sys_read32(base + DMICLCTL_OFFSET) & DMICLCTL_CPA)) {
310 k_sleep(K_USEC(100));
311 }
312 #endif
313 }
314
dai_dmic_dis_power(const struct dai_intel_dmic * dmic)315 static inline void dai_dmic_dis_power(const struct dai_intel_dmic *dmic)
316 {
317 uint32_t base = dai_dmic_base(dmic);
318 /* Disable DMIC power */
319 sys_write32((sys_read32(base + DMICLCTL_OFFSET) & (~DMICLCTL_SPA)),
320 base + DMICLCTL_OFFSET);
321 }
322
dai_dmic_probe(struct dai_intel_dmic * dmic)323 static int dai_dmic_probe(struct dai_intel_dmic *dmic)
324 {
325 LOG_INF("dmic_probe()");
326
327 /* Set state, note there is no playback direction support */
328 dmic->state = DAI_STATE_NOT_READY;
329
330 /* Enable DMIC power */
331 dai_dmic_en_power(dmic);
332
333 /* Disable dynamic clock gating for dmic before touching any reg */
334 dai_dmic_dis_clk_gating(dmic);
335
336 /* DMIC Change sync period */
337 dai_dmic_set_sync_period(CONFIG_DAI_DMIC_PLATFORM_SYNC_PERIOD, dmic);
338
339 /* DMIC Owner Select to DSP */
340 dai_dmic_claim_ownership(dmic);
341
342 return 0;
343 }
344
dai_dmic_remove(struct dai_intel_dmic * dmic)345 static int dai_dmic_remove(struct dai_intel_dmic *dmic)
346 {
347 uint32_t active_fifos_mask = dai_dmic_global.active_fifos_mask;
348 uint32_t pause_mask = dai_dmic_global.pause_mask;
349
350 LOG_INF("dmic_remove(), dmic_active_fifos_mask = 0x%x, dmic_pause_mask = 0x%x",
351 active_fifos_mask, pause_mask);
352
353 /* The next end tasks must be passed if another DAI FIFO still runs.
354 * Note: dai_put() function that calls remove() applies the spinlock
355 * so it is not needed here to protect access to mask bits.
356 */
357 if (active_fifos_mask || pause_mask) {
358 return 0;
359 }
360
361 /* Disable DMIC clock and power */
362 dai_dmic_en_clk_gating(dmic);
363 dai_dmic_dis_power(dmic);
364
365 /* DMIC Clean sync period */
366 dai_dmic_clear_sync_period(dmic);
367
368 /* DMIC Owner Select back to Host CPU + DSP */
369 dai_dmic_release_ownership(dmic);
370
371 return 0;
372 }
373
dai_dmic_timestamp_config(const struct device * dev,struct dai_ts_cfg * cfg)374 static int dai_dmic_timestamp_config(const struct device *dev, struct dai_ts_cfg *cfg)
375 {
376 cfg->walclk_rate = CONFIG_DAI_DMIC_HW_IOCLK;
377
378 return 0;
379 }
380
dai_timestamp_dmic_start(const struct device * dev,struct dai_ts_cfg * cfg)381 static int dai_timestamp_dmic_start(const struct device *dev, struct dai_ts_cfg *cfg)
382 {
383 uint32_t addr = TS_DMIC_LOCAL_TSCTRL;
384 uint32_t cdmas;
385
386 /* Set DMIC timestamp registers */
387
388 /* First point CDMAS to GPDMA channel that is used by DMIC
389 * also clear NTK to be sure there is no old timestamp.
390 */
391 cdmas = FIELD_PREP(TS_LOCAL_TSCTRL_CDMAS, cfg->dma_chan_index +
392 cfg->dma_chan_count * cfg->dma_id);
393 sys_write32(TS_LOCAL_TSCTRL_NTK | cdmas, addr);
394
395 /* Request on demand timestamp */
396 sys_write32(TS_LOCAL_TSCTRL_ODTS | cdmas, addr);
397
398 return 0;
399 }
400
dai_timestamp_dmic_stop(const struct device * dev,struct dai_ts_cfg * cfg)401 static int dai_timestamp_dmic_stop(const struct device *dev, struct dai_ts_cfg *cfg)
402 {
403 /* Clear NTK and write zero to CDMAS */
404 sys_write32(TS_LOCAL_TSCTRL_NTK, TS_DMIC_LOCAL_TSCTRL);
405 return 0;
406 }
407
dai_timestamp_dmic_get(const struct device * dev,struct dai_ts_cfg * cfg,struct dai_ts_data * tsd)408 static int dai_timestamp_dmic_get(const struct device *dev, struct dai_ts_cfg *cfg,
409 struct dai_ts_data *tsd)
410 {
411 /* Read DMIC timestamp registers */
412 uint32_t tsctrl = TS_DMIC_LOCAL_TSCTRL;
413 uint32_t ntk;
414
415 /* Read SSP timestamp registers */
416 ntk = sys_read32(tsctrl) & TS_LOCAL_TSCTRL_NTK;
417 if (!ntk) {
418 goto out;
419 }
420
421 /* NTK was set, get wall clock */
422 tsd->walclk = sys_read64(TS_DMIC_LOCAL_WALCLK);
423
424 /* Sample */
425 tsd->sample = sys_read64(TS_DMIC_LOCAL_SAMPLE);
426
427 /* Clear NTK to enable successive timestamps */
428 sys_write32(TS_LOCAL_TSCTRL_NTK, tsctrl);
429
430 out:
431 tsd->walclk_rate = cfg->walclk_rate;
432 if (!ntk) {
433 return -ENODATA;
434 }
435
436 return 0;
437 }
438
439 /* this ramps volume changes over time */
dai_dmic_gain_ramp(struct dai_intel_dmic * dmic)440 static void dai_dmic_gain_ramp(struct dai_intel_dmic *dmic)
441 {
442 k_spinlock_key_t key;
443 int32_t gval;
444 uint32_t val;
445 int i;
446
447 /* Currently there's no DMIC HW internal mutings and wait times
448 * applied into this start sequence. It can be implemented here if
449 * start of audio capture would contain clicks and/or noise and it
450 * is not suppressed by gain ramp somewhere in the capture pipe.
451 */
452 LOG_DBG("DMIC gain ramp");
453
454 /*
455 * At run-time dmic->gain is only changed in this function, and this
456 * function runs in the pipeline task context, so it cannot run
457 * concurrently on multiple cores, since there's always only one
458 * task associated with each DAI, so we don't need to hold the lock to
459 * read the value here.
460 */
461 if (dmic->gain == DMIC_HW_FIR_GAIN_MAX << 11) {
462 return;
463 }
464
465 key = k_spin_lock(&dmic->lock);
466
467 /* Increment gain with logarithmic step.
468 * Gain is Q2.30 and gain modifier is Q12.20.
469 */
470 dmic->startcount++;
471 dmic->gain = q_multsr_sat_32x32(dmic->gain, dmic->gain_coef, Q_SHIFT_GAIN_X_GAIN_COEF);
472
473 /* Gain is stored as Q2.30, while HW register is Q1.19 so shift
474 * the value right by 11.
475 */
476 gval = dmic->gain >> 11;
477
478 /* Note that DMIC gain value zero has a special purpose. Value zero
479 * sets gain bypass mode in HW. Zero value will be applied after ramp
480 * is complete. It is because exact 1.0 gain is not possible with Q1.19.
481 */
482 if (gval > DMIC_HW_FIR_GAIN_MAX) {
483 gval = 0;
484 dmic->gain = DMIC_HW_FIR_GAIN_MAX << 11;
485 }
486
487 /* Write gain to registers */
488 for (i = 0; i < CONFIG_DAI_DMIC_HW_CONTROLLERS; i++) {
489 if (!dmic->enable[i]) {
490 continue;
491 }
492
493 if (dmic->startcount == DMIC_UNMUTE_CIC) {
494 dai_dmic_update_bits(dmic, dmic_base[i] + CIC_CONTROL,
495 CIC_CONTROL_MIC_MUTE, 0);
496 }
497
498 if (dmic->startcount == DMIC_UNMUTE_FIR) {
499 dai_dmic_update_bits(dmic, dmic_base[i] + FIR_CHANNEL_REGS_SIZE *
500 dmic->dai_config_params.dai_index + FIR_CONTROL,
501 FIR_CONTROL_MUTE, 0);
502 }
503
504 if (gval != 0) {
505 val = FIELD_PREP(OUT_GAIN, gval);
506 dai_dmic_write(dmic, dmic_base[i] + FIR_CHANNEL_REGS_SIZE *
507 dmic->dai_config_params.dai_index + OUT_GAIN_LEFT, val);
508 dai_dmic_write(dmic, dmic_base[i] + FIR_CHANNEL_REGS_SIZE *
509 dmic->dai_config_params.dai_index + OUT_GAIN_RIGHT, val);
510 } else {
511 dai_dmic_write(dmic, dmic_base[i] + FIR_CHANNEL_REGS_SIZE *
512 dmic->dai_config_params.dai_index + OUT_GAIN_LEFT,
513 dmic->gain_left);
514 dai_dmic_write(dmic, dmic_base[i] + FIR_CHANNEL_REGS_SIZE *
515 dmic->dai_config_params.dai_index + OUT_GAIN_RIGHT,
516 dmic->gain_right);
517 }
518 }
519
520 k_spin_unlock(&dmic->lock, key);
521 }
522
dai_dmic_start(struct dai_intel_dmic * dmic)523 static void dai_dmic_start(struct dai_intel_dmic *dmic)
524 {
525 k_spinlock_key_t key;
526 int i;
527 int mic_a;
528 int mic_b;
529 int start_fir;
530
531 /* enable port */
532 key = k_spin_lock(&dmic->lock);
533
534 #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE
535 for (i = 0; i < CONFIG_DAI_DMIC_HW_CONTROLLERS; i++) {
536 dai_dmic_update_bits(dmic, dmic_base[i] + CIC_CONTROL, CIC_CONTROL_SOFT_RESET, 0);
537 }
538 #endif
539
540 dmic->startcount = 0;
541
542 /* Compute unmute ramp gain update coefficient. */
543 dmic->gain_coef = db2lin_fixed(LOGRAMP_CONST_TERM / dmic->unmute_time_ms);
544
545 /* Initial gain value, convert Q12.20 to Q2.30 */
546 dmic->gain = Q_SHIFT_LEFT(db2lin_fixed(LOGRAMP_START_DB), 20, 30);
547
548 dai_dmic_sync_prepare(dmic);
549
550 dai_dmic_start_fifo_packers(dmic, dmic->dai_config_params.dai_index);
551
552 for (i = 0; i < CONFIG_DAI_DMIC_HW_CONTROLLERS; i++) {
553 mic_a = dmic->enable[i] & 1;
554 mic_b = (dmic->enable[i] & 2) >> 1;
555 start_fir = dmic->enable[i] > 0;
556
557 /* If both microphones are needed start them simultaneously
558 * to start them in sync. The reset may be cleared for another
559 * FIFO already. If only one mic, start them independently.
560 * This makes sure we do not clear start/en for another DAI.
561 */
562 if (mic_a && mic_b) {
563 dai_dmic_update_bits(dmic, dmic_base[i] + CIC_CONTROL,
564 CIC_CONTROL_CIC_START_A |
565 CIC_CONTROL_CIC_START_B,
566 FIELD_PREP(CIC_CONTROL_CIC_START_A, 1) |
567 FIELD_PREP(CIC_CONTROL_CIC_START_B, 1));
568 dai_dmic_update_bits(dmic, dmic_base[i] + MIC_CONTROL,
569 MIC_CONTROL_PDM_EN_A |
570 MIC_CONTROL_PDM_EN_B,
571 FIELD_PREP(MIC_CONTROL_PDM_EN_A, 1) |
572 FIELD_PREP(MIC_CONTROL_PDM_EN_B, 1));
573 } else if (mic_a) {
574 dai_dmic_update_bits(dmic, dmic_base[i] + CIC_CONTROL,
575 CIC_CONTROL_CIC_START_A,
576 FIELD_PREP(CIC_CONTROL_CIC_START_A, 1));
577 dai_dmic_update_bits(dmic, dmic_base[i] + MIC_CONTROL,
578 MIC_CONTROL_PDM_EN_A,
579 FIELD_PREP(MIC_CONTROL_PDM_EN_A, 1));
580 } else if (mic_b) {
581 dai_dmic_update_bits(dmic, dmic_base[i] + CIC_CONTROL,
582 CIC_CONTROL_CIC_START_B,
583 FIELD_PREP(CIC_CONTROL_CIC_START_B, 1));
584 dai_dmic_update_bits(dmic, dmic_base[i] + MIC_CONTROL,
585 MIC_CONTROL_PDM_EN_B,
586 FIELD_PREP(MIC_CONTROL_PDM_EN_B, 1));
587 }
588
589 dai_dmic_update_bits(dmic, dmic_base[i] + FIR_CHANNEL_REGS_SIZE *
590 dmic->dai_config_params.dai_index + FIR_CONTROL,
591 FIR_CONTROL_START,
592 FIELD_PREP(FIR_CONTROL_START, start_fir));
593 }
594
595 #ifndef CONFIG_SOC_SERIES_INTEL_ADSP_ACE
596 /* Clear soft reset for all/used PDM controllers. This should
597 * start capture in sync.
598 */
599 for (i = 0; i < CONFIG_DAI_DMIC_HW_CONTROLLERS; i++) {
600 dai_dmic_update_bits(dmic, dmic_base[i] + CIC_CONTROL,
601 CIC_CONTROL_SOFT_RESET, 0);
602
603 LOG_INF("dmic_start(), cic 0x%08x",
604 dai_dmic_read(dmic, dmic_base[i] + CIC_CONTROL));
605 }
606 #endif
607
608 /* Set bit dai->index */
609 dai_dmic_global.active_fifos_mask |= BIT(dmic->dai_config_params.dai_index);
610 dai_dmic_global.pause_mask &= ~BIT(dmic->dai_config_params.dai_index);
611
612 dmic->state = DAI_STATE_RUNNING;
613 k_spin_unlock(&dmic->lock, key);
614
615 dmic_sync_trigger(dmic);
616
617 LOG_INF("dmic_start(), dmic_active_fifos_mask = 0x%x",
618 dai_dmic_global.active_fifos_mask);
619 }
620
dai_dmic_stop(struct dai_intel_dmic * dmic,bool stop_is_pause)621 static void dai_dmic_stop(struct dai_intel_dmic *dmic, bool stop_is_pause)
622 {
623 k_spinlock_key_t key;
624 int i;
625
626 LOG_DBG("dmic_stop()");
627 key = k_spin_lock(&dmic->lock);
628
629 dai_dmic_stop_fifo_packers(dmic, dmic->dai_config_params.dai_index);
630
631 /* Set soft reset and mute on for all PDM controllers. */
632 LOG_INF("dmic_stop(), dmic_active_fifos_mask = 0x%x",
633 dai_dmic_global.active_fifos_mask);
634
635 /* Clear bit dmic->dai_config_params.dai_index for active FIFO.
636 * If stop for pause, set pause mask bit.
637 * If stop is not for pausing, it is safe to clear the pause bit.
638 */
639 dai_dmic_global.active_fifos_mask &= ~BIT(dmic->dai_config_params.dai_index);
640 if (stop_is_pause) {
641 dai_dmic_global.pause_mask |= BIT(dmic->dai_config_params.dai_index);
642 } else {
643 dai_dmic_global.pause_mask &= ~BIT(dmic->dai_config_params.dai_index);
644 }
645
646 for (i = 0; i < CONFIG_DAI_DMIC_HW_CONTROLLERS; i++) {
647 /* Don't stop CIC yet if one FIFO remains active */
648 if (dai_dmic_global.active_fifos_mask == 0) {
649 dai_dmic_update_bits(dmic, dmic_base[i] + CIC_CONTROL,
650 CIC_CONTROL_SOFT_RESET |
651 CIC_CONTROL_MIC_MUTE,
652 CIC_CONTROL_SOFT_RESET |
653 CIC_CONTROL_MIC_MUTE);
654 }
655 dai_dmic_update_bits(dmic, dmic_base[i] + FIR_CHANNEL_REGS_SIZE *
656 dmic->dai_config_params.dai_index + FIR_CONTROL,
657 FIR_CONTROL_MUTE,
658 FIR_CONTROL_MUTE);
659 }
660
661 k_spin_unlock(&dmic->lock, key);
662 }
663
dai_dmic_get_properties(const struct device * dev,enum dai_dir dir,int stream_id)664 const struct dai_properties *dai_dmic_get_properties(const struct device *dev,
665 enum dai_dir dir,
666 int stream_id)
667 {
668 const struct dai_intel_dmic *dmic = (const struct dai_intel_dmic *)dev->data;
669 struct dai_properties *prop = (struct dai_properties *)dev->config;
670
671 prop->fifo_address = dmic->fifo.offset;
672 prop->fifo_depth = dmic->fifo.depth;
673 prop->dma_hs_id = dmic->fifo.handshake;
674 prop->reg_init_delay = 0;
675
676 return prop;
677 }
678
dai_dmic_trigger(const struct device * dev,enum dai_dir dir,enum dai_trigger_cmd cmd)679 static int dai_dmic_trigger(const struct device *dev, enum dai_dir dir,
680 enum dai_trigger_cmd cmd)
681 {
682 struct dai_intel_dmic *dmic = (struct dai_intel_dmic *)dev->data;
683
684 LOG_DBG("dmic_trigger()");
685
686 if (dir != DAI_DIR_RX) {
687 LOG_ERR("dmic_trigger(): direction != DAI_DIR_RX");
688 return -EINVAL;
689 }
690
691 switch (cmd) {
692 case DAI_TRIGGER_START:
693 if (dmic->state == DAI_STATE_PAUSED ||
694 dmic->state == DAI_STATE_PRE_RUNNING) {
695 dai_dmic_start(dmic);
696 dmic->state = DAI_STATE_RUNNING;
697 } else {
698 LOG_ERR("dmic_trigger(): state is not prepare or paused, dmic->state = %u",
699 dmic->state);
700 }
701 break;
702 case DAI_TRIGGER_STOP:
703 dai_dmic_stop(dmic, false);
704 dmic->state = DAI_STATE_PRE_RUNNING;
705 break;
706 case DAI_TRIGGER_PAUSE:
707 dai_dmic_stop(dmic, true);
708 dmic->state = DAI_STATE_PAUSED;
709 break;
710 case DAI_TRIGGER_COPY:
711 dai_dmic_gain_ramp(dmic);
712 break;
713 default:
714 break;
715 }
716
717 return 0;
718 }
719
dai_dmic_get_config(const struct device * dev,struct dai_config * cfg,enum dai_dir dir)720 static int dai_dmic_get_config(const struct device *dev, struct dai_config *cfg, enum dai_dir dir)
721 {
722 struct dai_intel_dmic *dmic = (struct dai_intel_dmic *)dev->data;
723
724 if (dir != DAI_DIR_RX) {
725 return -EINVAL;
726 }
727
728 if (!cfg) {
729 return -EINVAL;
730 }
731
732 *cfg = dmic->dai_config_params;
733
734 return 0;
735 }
736
dai_dmic_set_config(const struct device * dev,const struct dai_config * cfg,const void * bespoke_cfg)737 static int dai_dmic_set_config(const struct device *dev,
738 const struct dai_config *cfg, const void *bespoke_cfg)
739
740 {
741 struct dai_intel_dmic *dmic = (struct dai_intel_dmic *)dev->data;
742 int ret = 0;
743 int di = dmic->dai_config_params.dai_index;
744 k_spinlock_key_t key;
745
746 LOG_INF("dmic_set_config()");
747
748 if (di >= CONFIG_DAI_DMIC_HW_FIFOS) {
749 LOG_ERR("dmic_set_config(): DAI index exceeds number of FIFOs");
750 return -EINVAL;
751 }
752
753 if (!bespoke_cfg) {
754 LOG_ERR("dmic_set_config(): NULL config");
755 return -EINVAL;
756 }
757
758 dai_dmic_program_channel_map(dmic, cfg, di);
759
760 key = k_spin_lock(&dmic->lock);
761
762 #if CONFIG_DAI_INTEL_DMIC_TPLG_PARAMS
763 #error DMIC TPLG is not yet implemented
764
765 #elif CONFIG_DAI_INTEL_DMIC_NHLT
766 ret = dai_dmic_set_config_nhlt(dmic, bespoke_cfg);
767
768 /* There's no unmute ramp duration in blob, so the default rate dependent is used. */
769 dmic->unmute_time_ms = dmic_get_unmute_ramp_from_samplerate(dmic->dai_config_params.rate);
770 #else
771 #error No DMIC config selected
772 #endif
773
774 if (ret < 0) {
775 LOG_ERR("dmic_set_config(): Failed to set the requested configuration.");
776 goto out;
777 }
778
779 dmic->state = DAI_STATE_PRE_RUNNING;
780
781 out:
782 k_spin_unlock(&dmic->lock, key);
783 return ret;
784 }
785
dai_dmic_probe_wrapper(const struct device * dev)786 static int dai_dmic_probe_wrapper(const struct device *dev)
787 {
788 struct dai_intel_dmic *dmic = (struct dai_intel_dmic *)dev->data;
789 k_spinlock_key_t key;
790 int ret = 0;
791
792 key = k_spin_lock(&dmic->lock);
793
794 if (dmic->sref == 0) {
795 ret = dai_dmic_probe(dmic);
796 }
797
798 if (!ret) {
799 dmic->sref++;
800 }
801
802 k_spin_unlock(&dmic->lock, key);
803
804 return ret;
805 }
806
dai_dmic_remove_wrapper(const struct device * dev)807 static int dai_dmic_remove_wrapper(const struct device *dev)
808 {
809 struct dai_intel_dmic *dmic = (struct dai_intel_dmic *)dev->data;
810 k_spinlock_key_t key;
811 int ret = 0;
812
813 key = k_spin_lock(&dmic->lock);
814
815 if (--dmic->sref == 0) {
816 ret = dai_dmic_remove(dmic);
817 }
818
819 k_spin_unlock(&dmic->lock, key);
820
821 return ret;
822 }
823
dmic_pm_action(const struct device * dev,enum pm_device_action action)824 static int dmic_pm_action(const struct device *dev, enum pm_device_action action)
825 {
826 switch (action) {
827 case PM_DEVICE_ACTION_SUSPEND:
828 dai_dmic_remove_wrapper(dev);
829 break;
830 case PM_DEVICE_ACTION_RESUME:
831 dai_dmic_probe_wrapper(dev);
832 break;
833 case PM_DEVICE_ACTION_TURN_OFF:
834 case PM_DEVICE_ACTION_TURN_ON:
835 /* All device pm is handled during resume and suspend */
836 break;
837 default:
838 return -ENOTSUP;
839 }
840
841 return 0;
842 }
843
844 DEVICE_API(dai, dai_dmic_ops) = {
845 .probe = pm_device_runtime_get,
846 .remove = pm_device_runtime_put,
847 .config_set = dai_dmic_set_config,
848 .config_get = dai_dmic_get_config,
849 .get_properties = dai_dmic_get_properties,
850 .trigger = dai_dmic_trigger,
851 .ts_config = dai_dmic_timestamp_config,
852 .ts_start = dai_timestamp_dmic_start,
853 .ts_stop = dai_timestamp_dmic_stop,
854 .ts_get = dai_timestamp_dmic_get
855 };
856
dai_dmic_initialize_device(const struct device * dev)857 static int dai_dmic_initialize_device(const struct device *dev)
858 {
859 return pm_device_driver_init(dev, dmic_pm_action);
860 };
861
862
863 #define DAI_INTEL_DMIC_DEVICE_INIT(n) \
864 static struct dai_properties dai_intel_dmic_properties_##n; \
865 \
866 static struct dai_intel_dmic dai_intel_dmic_data_##n = \
867 { .dai_config_params = \
868 { \
869 .type = DAI_INTEL_DMIC, \
870 .dai_index = n \
871 }, \
872 .reg_base = DT_INST_REG_ADDR_BY_IDX(n, 0), \
873 .shim_base = DT_INST_PROP(n, shim), \
874 IF_ENABLED(DT_NODE_EXISTS(DT_NODELABEL(hdamlddmic)), \
875 (.hdamldmic_base = DT_REG_ADDR(DT_NODELABEL(hdamlddmic)),)) \
876 IF_ENABLED(DT_NODE_EXISTS(DT_NODELABEL(dmicvss)), \
877 (.vshim_base = DT_REG_ADDR(DT_NODELABEL(dmicvss)),)) \
878 .irq = DT_INST_IRQN(n), \
879 .fifo = \
880 { \
881 .offset = DT_INST_REG_ADDR_BY_IDX(n, 0) \
882 + DT_INST_PROP(n, fifo), \
883 .handshake = DMA_HANDSHAKE_DMIC_CH##n \
884 }, \
885 }; \
886 \
887 PM_DEVICE_DT_INST_DEFINE(n, dmic_pm_action); \
888 \
889 DEVICE_DT_INST_DEFINE(n, \
890 dai_dmic_initialize_device, \
891 PM_DEVICE_DT_INST_GET(n), \
892 &dai_intel_dmic_data_##n, \
893 &dai_intel_dmic_properties_##n, \
894 POST_KERNEL, \
895 CONFIG_DAI_INIT_PRIORITY, \
896 &dai_dmic_ops);
897
898 DT_INST_FOREACH_STATUS_OKAY(DAI_INTEL_DMIC_DEVICE_INIT)
899