1 /*
2  * Copyright (c) 2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT intel_dai_dmic
8 #define LOG_DOMAIN dai_intel_dmic
9 #include <zephyr/logging/log.h>
10 LOG_MODULE_REGISTER(LOG_DOMAIN);
11 
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <zephyr/kernel.h>
16 #include <zephyr/spinlock.h>
17 #include <zephyr/devicetree.h>
18 #include <zephyr/pm/device.h>
19 #include <zephyr/pm/device_runtime.h>
20 
21 #include <zephyr/drivers/dai.h>
22 #include <zephyr/irq.h>
23 
24 #include "dmic.h"
25 #include <dmic_regs.h>
26 
27 /* Base addresses (in PDM scope) of 2ch PDM controllers and coefficient RAM. */
28 static const uint32_t dmic_base[4] = {PDM0, PDM1, PDM2, PDM3};
29 
30 /* global data shared between all dmic instances */
31 struct dai_dmic_global_shared dai_dmic_global;
32 
33 /* Helper macro to read 64-bit data using two 32-bit data read */
34 #define sys_read64(addr)    (((uint64_t)(sys_read32(addr + 4)) << 32) | \
35 			     sys_read32(addr))
36 
37 int dai_dmic_set_config_nhlt(struct dai_intel_dmic *dmic, const void *spec_config);
38 
39 /* Exponent function for small values of x. This function calculates
40  * fairly accurately exponent for x in range -2.0 .. +2.0. The iteration
41  * uses first 11 terms of Taylor series approximation for exponent
42  * function. With the current scaling the numerator just remains under
43  * 64 bits with the 11 terms.
44  *
45  * See https://en.wikipedia.org/wiki/Exponential_function#Computation
46  *
47  * The input is Q3.29
48  * The output is Q9.23
49  */
exp_small_fixed(int32_t x)50 static int32_t exp_small_fixed(int32_t x)
51 {
52 	int64_t p;
53 	int64_t num = Q_SHIFT_RND(x, 29, 23);
54 	int32_t y = (int32_t)num;
55 	int32_t den = 1;
56 	int32_t inc;
57 	int k;
58 
59 	/* Numerator is x^k, denominator is k! */
60 	for (k = 2; k < 12; k++) {
61 		p = num * x; /* Q9.23 x Q3.29 -> Q12.52 */
62 		num = Q_SHIFT_RND(p, 52, 23);
63 		den = den * k;
64 		inc = (int32_t)(num / den);
65 		y += inc;
66 	}
67 
68 	return y + ONE_Q23;
69 }
70 
exp_fixed(int32_t x)71 static int32_t exp_fixed(int32_t x)
72 {
73 	int32_t xs;
74 	int32_t y;
75 	int32_t z;
76 	int i;
77 	int n = 0;
78 
79 	if (x < Q_CONVERT_FLOAT(-11.5, 27))
80 		return 0;
81 
82 	if (x > Q_CONVERT_FLOAT(7.6245, 27))
83 		return INT32_MAX;
84 
85 	/* x is Q5.27 */
86 	xs = x;
87 	while (xs >= TWO_Q27 || xs <= MINUS_TWO_Q27) {
88 		xs >>= 1;
89 		n++;
90 	}
91 
92 	/* exp_small_fixed() input is Q3.29, while x1 is Q5.27
93 	 * exp_small_fixed() output is Q9.23, while z is Q12.20
94 	 */
95 	z = Q_SHIFT_RND(exp_small_fixed(Q_SHIFT_LEFT(xs, 27, 29)), 23, 20);
96 	y = ONE_Q20;
97 	for (i = 0; i < (1 << n); i++)
98 		y = (int32_t)Q_MULTSR_32X32((int64_t)y, z, 20, 20, 20);
99 
100 	return y;
101 }
102 
db2lin_fixed(int32_t db)103 static int32_t db2lin_fixed(int32_t db)
104 {
105 	int32_t arg;
106 
107 	if (db < Q_CONVERT_FLOAT(-100.0, 24))
108 		return 0;
109 
110 	/* Q8.24 x Q5.27, result needs to be Q5.27 */
111 	arg = (int32_t)Q_MULTSR_32X32((int64_t)db, LOG10_DIV20_Q27, 24, 27, 27);
112 	return exp_fixed(arg);
113 }
114 
dai_dmic_update_bits(const struct dai_intel_dmic * dmic,uint32_t reg,uint32_t mask,uint32_t val)115 static void dai_dmic_update_bits(const struct dai_intel_dmic *dmic,
116 				 uint32_t reg, uint32_t mask, uint32_t val)
117 {
118 	uint32_t dest = dmic->reg_base + reg;
119 
120 	sys_write32((sys_read32(dest) & (~mask)) | (val & mask), dest);
121 }
122 
dai_dmic_write(const struct dai_intel_dmic * dmic,uint32_t reg,uint32_t val)123 static inline void dai_dmic_write(const struct dai_intel_dmic *dmic,
124 			   uint32_t reg, uint32_t val)
125 {
126 	sys_write32(val, dmic->reg_base + reg);
127 }
128 
dai_dmic_read(const struct dai_intel_dmic * dmic,uint32_t reg)129 static inline uint32_t dai_dmic_read(const struct dai_intel_dmic *dmic,
130 				     uint32_t reg)
131 {
132 	return sys_read32(dmic->reg_base + reg);
133 }
134 
135 #if CONFIG_DAI_DMIC_HAS_OWNERSHIP
dai_dmic_claim_ownership(const struct dai_intel_dmic * dmic)136 static inline void dai_dmic_claim_ownership(const struct dai_intel_dmic *dmic)
137 {
138 	/* DMIC Owner Select to DSP */
139 	sys_write32(sys_read32(dmic->shim_base + DMICLCTL_OFFSET) |
140 		    FIELD_PREP(DMICLCTL_OSEL, 0x3), dmic->shim_base + DMICLCTL_OFFSET);
141 }
142 
dai_dmic_release_ownership(const struct dai_intel_dmic * dmic)143 static inline void dai_dmic_release_ownership(const struct dai_intel_dmic *dmic)
144 {
145 	/* DMIC Owner Select back to Host CPU + DSP */
146 	sys_write32(sys_read32(dmic->shim_base + DMICLCTL_OFFSET) &
147 			~DMICLCTL_OSEL, dmic->shim_base + DMICLCTL_OFFSET);
148 }
149 
150 #else /* CONFIG_DAI_DMIC_HAS_OWNERSHIP */
151 
dai_dmic_claim_ownership(const struct dai_intel_dmic * dmic)152 static inline void dai_dmic_claim_ownership(const struct dai_intel_dmic *dmic) {}
dai_dmic_release_ownership(const struct dai_intel_dmic * dmic)153 static inline void dai_dmic_release_ownership(const struct dai_intel_dmic *dmic) {}
154 
155 #endif /* CONFIG_DAI_DMIC_HAS_OWNERSHIP */
156 
dai_dmic_base(const struct dai_intel_dmic * dmic)157 static inline uint32_t dai_dmic_base(const struct dai_intel_dmic *dmic)
158 {
159 #if defined(CONFIG_SOC_INTEL_ACE20_LNL) || defined(CONFIG_SOC_INTEL_ACE30_PTL)
160 	return dmic->hdamldmic_base;
161 #else
162 	return dmic->shim_base;
163 #endif
164 }
165 
166 #if CONFIG_DAI_DMIC_HAS_MULTIPLE_LINE_SYNC
dai_dmic_set_sync_period(uint32_t period,const struct dai_intel_dmic * dmic)167 static inline void dai_dmic_set_sync_period(uint32_t period, const struct dai_intel_dmic *dmic)
168 {
169 	uint32_t val = CONFIG_DAI_DMIC_HW_IOCLK / period - 1;
170 	uint32_t base = dai_dmic_base(dmic);
171 	/* DMIC Change sync period */
172 #if defined(CONFIG_SOC_INTEL_ACE20_LNL) || defined(CONFIG_SOC_INTEL_ACE30_PTL)
173 	sys_write32(sys_read32(base + DMICSYNC_OFFSET) | FIELD_PREP(DMICSYNC_SYNCPRD, val),
174 		    base + DMICSYNC_OFFSET);
175 	sys_write32(sys_read32(base + DMICSYNC_OFFSET) | DMICSYNC_SYNCPU,
176 		    base + DMICSYNC_OFFSET);
177 
178 	if (!WAIT_FOR((sys_read32(base + DMICSYNC_OFFSET) & DMICSYNC_SYNCPU) == 0, 1000,
179 		      k_sleep(K_USEC(100)))) {
180 		LOG_ERR("poll timeout");
181 	}
182 
183 	sys_write32(sys_read32(base + DMICSYNC_OFFSET) | DMICSYNC_CMDSYNC,
184 		    base + DMICSYNC_OFFSET);
185 #else /* All other CAVS and ACE platforms */
186 	sys_write32(sys_read32(base + DMICSYNC_OFFSET) | FIELD_PREP(DMICSYNC_SYNCPRD, val),
187 		    base + DMICSYNC_OFFSET);
188 	sys_write32(sys_read32(base + DMICSYNC_OFFSET) | DMICSYNC_CMDSYNC,
189 		    base + DMICSYNC_OFFSET);
190 #endif
191 }
192 
dai_dmic_clear_sync_period(const struct dai_intel_dmic * dmic)193 static inline void dai_dmic_clear_sync_period(const struct dai_intel_dmic *dmic)
194 {
195 	uint32_t base = dai_dmic_base(dmic);
196 	/* DMIC Clean sync period */
197 	sys_write32(sys_read32(base + DMICSYNC_OFFSET) & ~DMICSYNC_SYNCPRD,
198 			base + DMICSYNC_OFFSET);
199 	sys_write32(sys_read32(base + DMICSYNC_OFFSET) & ~DMICSYNC_CMDSYNC,
200 			base + DMICSYNC_OFFSET);
201 }
202 
203 /* Preparing for command synchronization on multiple link segments */
dai_dmic_sync_prepare(const struct dai_intel_dmic * dmic)204 static inline void dai_dmic_sync_prepare(const struct dai_intel_dmic *dmic)
205 {
206 	uint32_t base = dai_dmic_base(dmic);
207 
208 	sys_write32(sys_read32(base + DMICSYNC_OFFSET) | DMICSYNC_CMDSYNC,
209 		    base + DMICSYNC_OFFSET);
210 }
211 
212 /* Trigering synchronization of command execution */
dmic_sync_trigger(const struct dai_intel_dmic * dmic)213 static void dmic_sync_trigger(const struct dai_intel_dmic *dmic)
214 {
215 	uint32_t base = dai_dmic_base(dmic);
216 
217 	__ASSERT_NO_MSG((sys_read32(base + DMICSYNC_OFFSET) & DMICSYNC_CMDSYNC) != 0);
218 
219 	sys_write32(sys_read32(base + DMICSYNC_OFFSET) |
220 		    DMICSYNC_SYNCGO, base + DMICSYNC_OFFSET);
221 
222 	/* waiting for CMDSYNC bit clearing */
223 	if (!WAIT_FOR((sys_read32(base + DMICSYNC_OFFSET) & DMICSYNC_CMDSYNC) == 0,
224 		      1000, k_sleep(K_USEC(100)))) {
225 		LOG_ERR("poll timeout");
226 	}
227 }
228 
229 #else /* CONFIG_DAI_DMIC_HAS_MULTIPLE_LINE_SYNC */
230 
dai_dmic_set_sync_period(uint32_t period,const struct dai_intel_dmic * dmic)231 static inline void dai_dmic_set_sync_period(uint32_t period, const struct dai_intel_dmic *dmic) {}
dai_dmic_clear_sync_period(const struct dai_intel_dmic * dmic)232 static inline void dai_dmic_clear_sync_period(const struct dai_intel_dmic *dmic) {}
dai_dmic_sync_prepare(const struct dai_intel_dmic * dmic)233 static inline void dai_dmic_sync_prepare(const struct dai_intel_dmic *dmic) {}
dmic_sync_trigger(const struct dai_intel_dmic * dmic)234 static void dmic_sync_trigger(const struct dai_intel_dmic *dmic) {}
235 
236 #endif /* CONFIG_DAI_DMIC_HAS_MULTIPLE_LINE_SYNC */
237 
dai_dmic_start_fifo_packers(struct dai_intel_dmic * dmic,int fifo_index)238 static void dai_dmic_start_fifo_packers(struct dai_intel_dmic *dmic, int fifo_index)
239 {
240 
241 	/* Start FIFO packers and clear FIFO initialize bits */
242 	dai_dmic_update_bits(dmic, fifo_index * PDM_CHANNEL_REGS_SIZE + OUTCONTROL,
243 			     OUTCONTROL_SIP | OUTCONTROL_FINIT,
244 			     OUTCONTROL_SIP);
245 }
246 
dai_dmic_stop_fifo_packers(struct dai_intel_dmic * dmic,int fifo_index)247 static void dai_dmic_stop_fifo_packers(struct dai_intel_dmic *dmic,
248 					int fifo_index)
249 {
250 	/* Stop FIFO packers and set FIFO initialize bits */
251 	dai_dmic_update_bits(dmic, fifo_index * PDM_CHANNEL_REGS_SIZE + OUTCONTROL,
252 			     OUTCONTROL_SIP | OUTCONTROL_FINIT,
253 			     OUTCONTROL_FINIT);
254 }
255 
256 /* On DMIC IRQ event trace the status register that contains the status and
257  * error bit fields.
258  */
dai_dmic_irq_handler(const void * data)259 static void dai_dmic_irq_handler(const void *data)
260 {
261 	struct dai_intel_dmic *dmic = ((struct device *)data)->data;
262 	uint32_t val0;
263 	uint32_t val1;
264 
265 	/* Trace OUTSTAT0 register */
266 	val0 = dai_dmic_read(dmic, OUTSTAT);
267 	val1 = dai_dmic_read(dmic, OUTSTAT + PDM_CHANNEL_REGS_SIZE);
268 	LOG_DBG("dmic_irq_handler(), OUTSTAT0 = 0x%x, OUTSTAT1 = 0x%x", val0, val1);
269 
270 	if (val0 & OUTSTAT_ROR) {
271 		LOG_ERR("dmic_irq_handler(): full fifo A or PDM overrun");
272 		dai_dmic_write(dmic, OUTSTAT, val0);
273 		dai_dmic_stop_fifo_packers(dmic, 0);
274 	}
275 
276 	if (val1 & OUTSTAT_ROR) {
277 		LOG_ERR("dmic_irq_handler(): full fifo B or PDM overrun");
278 		dai_dmic_write(dmic, OUTSTAT + PDM_CHANNEL_REGS_SIZE, val1);
279 		dai_dmic_stop_fifo_packers(dmic, 1);
280 	}
281 }
282 
dai_dmic_dis_clk_gating(const struct dai_intel_dmic * dmic)283 static inline void dai_dmic_dis_clk_gating(const struct dai_intel_dmic *dmic)
284 {
285 	/* Disable DMIC clock gating */
286 #if (CONFIG_SOC_INTEL_ACE20_LNL || CONFIG_SOC_INTEL_ACE30_PTL)
287 	sys_write32((sys_read32(dmic->vshim_base + DMICLVSCTL_OFFSET) | DMICLVSCTL_DCGD),
288 		    dmic->vshim_base + DMICLVSCTL_OFFSET);
289 #else
290 	sys_write32((sys_read32(dmic->shim_base + DMICLCTL_OFFSET) | DMICLCTL_DCGD),
291 		    dmic->shim_base + DMICLCTL_OFFSET);
292 #endif
293 }
294 
dai_dmic_en_clk_gating(const struct dai_intel_dmic * dmic)295 static inline void dai_dmic_en_clk_gating(const struct dai_intel_dmic *dmic)
296 {
297 	/* Enable DMIC clock gating */
298 #if (CONFIG_SOC_INTEL_ACE20_LNL || CONFIG_SOC_INTEL_ACE30_PTL)
299 	sys_write32((sys_read32(dmic->vshim_base + DMICLVSCTL_OFFSET) & ~DMICLVSCTL_DCGD),
300 		    dmic->vshim_base + DMICLVSCTL_OFFSET);
301 #else /* All other CAVS and ACE platforms */
302 	sys_write32((sys_read32(dmic->shim_base + DMICLCTL_OFFSET) & ~DMICLCTL_DCGD),
303 		    dmic->shim_base + DMICLCTL_OFFSET);
304 #endif
305 
306 }
307 
dai_dmic_program_channel_map(const struct dai_intel_dmic * dmic,const struct dai_config * cfg,uint32_t index)308 static inline void dai_dmic_program_channel_map(const struct dai_intel_dmic *dmic,
309 						const struct dai_config *cfg,
310 						uint32_t index)
311 {
312 #if defined(CONFIG_SOC_INTEL_ACE20_LNL) || defined(CONFIG_SOC_INTEL_ACE30_PTL)
313 	uint16_t pcmsycm = cfg->link_config;
314 	uint32_t reg_add = dmic->shim_base + DMICXPCMSyCM_OFFSET + 0x0004*index;
315 
316 	sys_write16(pcmsycm, reg_add);
317 #else
318 	ARG_UNUSED(dmic);
319 	ARG_UNUSED(cfg);
320 	ARG_UNUSED(index);
321 #endif /* defined(CONFIG_SOC_INTEL_ACE20_LNL) || defined(CONFIG_SOC_INTEL_ACE30_PTL) */
322 }
323 
dai_dmic_en_power(const struct dai_intel_dmic * dmic)324 static inline void dai_dmic_en_power(const struct dai_intel_dmic *dmic)
325 {
326 	uint32_t base = dai_dmic_base(dmic);
327 	/* Enable DMIC power */
328 	sys_write32((sys_read32(base + DMICLCTL_OFFSET) | DMICLCTL_SPA),
329 			base + DMICLCTL_OFFSET);
330 
331 #if defined(CONFIG_SOC_INTEL_ACE20_LNL) || defined(CONFIG_SOC_INTEL_ACE30_PTL)
332 	while (!(sys_read32(base + DMICLCTL_OFFSET) & DMICLCTL_CPA)) {
333 		k_sleep(K_USEC(100));
334 	}
335 #endif
336 }
337 
dai_dmic_dis_power(const struct dai_intel_dmic * dmic)338 static inline void dai_dmic_dis_power(const struct dai_intel_dmic *dmic)
339 {
340 	uint32_t base = dai_dmic_base(dmic);
341 	/* Disable DMIC power */
342 	sys_write32((sys_read32(base + DMICLCTL_OFFSET) & (~DMICLCTL_SPA)),
343 		     base + DMICLCTL_OFFSET);
344 }
345 
dai_dmic_probe(struct dai_intel_dmic * dmic)346 static int dai_dmic_probe(struct dai_intel_dmic *dmic)
347 {
348 	LOG_INF("dmic_probe()");
349 
350 	/* Set state, note there is no playback direction support */
351 	dmic->state = DAI_STATE_NOT_READY;
352 
353 	/* Enable DMIC power */
354 	dai_dmic_en_power(dmic);
355 
356 	/* Disable dynamic clock gating for dmic before touching any reg */
357 	dai_dmic_dis_clk_gating(dmic);
358 
359 	/* DMIC Change sync period */
360 	dai_dmic_set_sync_period(CONFIG_DAI_DMIC_PLATFORM_SYNC_PERIOD, dmic);
361 
362 	/* DMIC Owner Select to DSP */
363 	dai_dmic_claim_ownership(dmic);
364 
365 	irq_enable(dmic->irq);
366 
367 	return 0;
368 }
369 
dai_dmic_remove(struct dai_intel_dmic * dmic)370 static int dai_dmic_remove(struct dai_intel_dmic *dmic)
371 {
372 	uint32_t active_fifos_mask = dai_dmic_global.active_fifos_mask;
373 	uint32_t pause_mask = dai_dmic_global.pause_mask;
374 
375 	LOG_INF("dmic_remove()");
376 
377 	irq_disable(dmic->irq);
378 
379 	LOG_INF("dmic_remove(), dmic_active_fifos_mask = 0x%x, dmic_pause_mask = 0x%x",
380 		active_fifos_mask, pause_mask);
381 
382 	/* The next end tasks must be passed if another DAI FIFO still runs.
383 	 * Note: dai_put() function that calls remove() applies the spinlock
384 	 * so it is not needed here to protect access to mask bits.
385 	 */
386 	if (active_fifos_mask || pause_mask)
387 		return 0;
388 
389 	/* Disable DMIC clock and power */
390 	dai_dmic_en_clk_gating(dmic);
391 	dai_dmic_dis_power(dmic);
392 
393 	/* DMIC Clean sync period */
394 	dai_dmic_clear_sync_period(dmic);
395 
396 	/* DMIC Owner Select back to Host CPU + DSP */
397 	dai_dmic_release_ownership(dmic);
398 
399 	return 0;
400 }
401 
dai_dmic_timestamp_config(const struct device * dev,struct dai_ts_cfg * cfg)402 static int dai_dmic_timestamp_config(const struct device *dev, struct dai_ts_cfg *cfg)
403 {
404 	cfg->walclk_rate = CONFIG_DAI_DMIC_HW_IOCLK;
405 
406 	return 0;
407 }
408 
dai_timestamp_dmic_start(const struct device * dev,struct dai_ts_cfg * cfg)409 static int dai_timestamp_dmic_start(const struct device *dev, struct dai_ts_cfg *cfg)
410 {
411 	uint32_t addr = TS_DMIC_LOCAL_TSCTRL;
412 	uint32_t cdmas;
413 
414 	/* Set DMIC timestamp registers */
415 
416 	/* First point CDMAS to GPDMA channel that is used by DMIC
417 	 * also clear NTK to be sure there is no old timestamp.
418 	 */
419 	cdmas = FIELD_PREP(TS_LOCAL_TSCTRL_CDMAS, cfg->dma_chan_index +
420 		cfg->dma_chan_count * cfg->dma_id);
421 	sys_write32(TS_LOCAL_TSCTRL_NTK | cdmas, addr);
422 
423 	/* Request on demand timestamp */
424 	sys_write32(TS_LOCAL_TSCTRL_ODTS | cdmas, addr);
425 
426 	return 0;
427 }
428 
dai_timestamp_dmic_stop(const struct device * dev,struct dai_ts_cfg * cfg)429 static int dai_timestamp_dmic_stop(const struct device *dev, struct dai_ts_cfg *cfg)
430 {
431 	/* Clear NTK and write zero to CDMAS */
432 	sys_write32(TS_LOCAL_TSCTRL_NTK, TS_DMIC_LOCAL_TSCTRL);
433 	return 0;
434 }
435 
dai_timestamp_dmic_get(const struct device * dev,struct dai_ts_cfg * cfg,struct dai_ts_data * tsd)436 static int dai_timestamp_dmic_get(const struct device *dev, struct dai_ts_cfg *cfg,
437 				  struct dai_ts_data *tsd)
438 {
439 	/* Read DMIC timestamp registers */
440 	uint32_t tsctrl = TS_DMIC_LOCAL_TSCTRL;
441 	uint32_t ntk;
442 
443 	/* Read SSP timestamp registers */
444 	ntk = sys_read32(tsctrl) & TS_LOCAL_TSCTRL_NTK;
445 	if (!ntk)
446 		goto out;
447 
448 	/* NTK was set, get wall clock */
449 	tsd->walclk = sys_read64(TS_DMIC_LOCAL_WALCLK);
450 
451 	/* Sample */
452 	tsd->sample = sys_read64(TS_DMIC_LOCAL_SAMPLE);
453 
454 	/* Clear NTK to enable successive timestamps */
455 	sys_write32(TS_LOCAL_TSCTRL_NTK, tsctrl);
456 
457 out:
458 	tsd->walclk_rate = cfg->walclk_rate;
459 	if (!ntk)
460 		return -ENODATA;
461 
462 	return 0;
463 }
464 
465 /* this ramps volume changes over time */
dai_dmic_gain_ramp(struct dai_intel_dmic * dmic)466 static void dai_dmic_gain_ramp(struct dai_intel_dmic *dmic)
467 {
468 	k_spinlock_key_t key;
469 	int32_t gval;
470 	uint32_t val;
471 	int i;
472 
473 	/* Currently there's no DMIC HW internal mutings and wait times
474 	 * applied into this start sequence. It can be implemented here if
475 	 * start of audio capture would contain clicks and/or noise and it
476 	 * is not suppressed by gain ramp somewhere in the capture pipe.
477 	 */
478 	LOG_DBG("DMIC gain ramp");
479 
480 	/*
481 	 * At run-time dmic->gain is only changed in this function, and this
482 	 * function runs in the pipeline task context, so it cannot run
483 	 * concurrently on multiple cores, since there's always only one
484 	 * task associated with each DAI, so we don't need to hold the lock to
485 	 * read the value here.
486 	 */
487 	if (dmic->gain == DMIC_HW_FIR_GAIN_MAX << 11)
488 		return;
489 
490 	key = k_spin_lock(&dmic->lock);
491 
492 	/* Increment gain with logarithmic step.
493 	 * Gain is Q2.30 and gain modifier is Q12.20.
494 	 */
495 	dmic->startcount++;
496 	dmic->gain = q_multsr_sat_32x32(dmic->gain, dmic->gain_coef, Q_SHIFT_GAIN_X_GAIN_COEF);
497 
498 	/* Gain is stored as Q2.30, while HW register is Q1.19 so shift
499 	 * the value right by 11.
500 	 */
501 	gval = dmic->gain >> 11;
502 
503 	/* Note that DMIC gain value zero has a special purpose. Value zero
504 	 * sets gain bypass mode in HW. Zero value will be applied after ramp
505 	 * is complete. It is because exact 1.0 gain is not possible with Q1.19.
506 	 */
507 	if (gval > DMIC_HW_FIR_GAIN_MAX) {
508 		gval = 0;
509 		dmic->gain = DMIC_HW_FIR_GAIN_MAX << 11;
510 	}
511 
512 	/* Write gain to registers */
513 	for (i = 0; i < CONFIG_DAI_DMIC_HW_CONTROLLERS; i++) {
514 		if (!dmic->enable[i])
515 			continue;
516 
517 		if (dmic->startcount == DMIC_UNMUTE_CIC)
518 			dai_dmic_update_bits(dmic, dmic_base[i] + CIC_CONTROL,
519 					     CIC_CONTROL_MIC_MUTE, 0);
520 
521 		if (dmic->startcount == DMIC_UNMUTE_FIR) {
522 			dai_dmic_update_bits(dmic, dmic_base[i] + FIR_CHANNEL_REGS_SIZE *
523 					     dmic->dai_config_params.dai_index + FIR_CONTROL,
524 					     FIR_CONTROL_MUTE, 0);
525 		}
526 
527 		if (gval != 0) {
528 			val = FIELD_PREP(OUT_GAIN, gval);
529 			dai_dmic_write(dmic, dmic_base[i] + FIR_CHANNEL_REGS_SIZE *
530 				       dmic->dai_config_params.dai_index + OUT_GAIN_LEFT, val);
531 			dai_dmic_write(dmic, dmic_base[i] + FIR_CHANNEL_REGS_SIZE *
532 				       dmic->dai_config_params.dai_index + OUT_GAIN_RIGHT, val);
533 		} else {
534 			dai_dmic_write(dmic, dmic_base[i] + FIR_CHANNEL_REGS_SIZE *
535 				       dmic->dai_config_params.dai_index + OUT_GAIN_LEFT,
536 				       dmic->gain_left);
537 			dai_dmic_write(dmic, dmic_base[i] + FIR_CHANNEL_REGS_SIZE *
538 				       dmic->dai_config_params.dai_index + OUT_GAIN_RIGHT,
539 				       dmic->gain_right);
540 		}
541 	}
542 
543 	k_spin_unlock(&dmic->lock, key);
544 }
545 
dai_dmic_start(struct dai_intel_dmic * dmic)546 static void dai_dmic_start(struct dai_intel_dmic *dmic)
547 {
548 	k_spinlock_key_t key;
549 	int i;
550 	int mic_a;
551 	int mic_b;
552 	int start_fir;
553 
554 	/* enable port */
555 	key = k_spin_lock(&dmic->lock);
556 
557 #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE
558 	for (i = 0; i < CONFIG_DAI_DMIC_HW_CONTROLLERS; i++)
559 		dai_dmic_update_bits(dmic, dmic_base[i] + CIC_CONTROL, CIC_CONTROL_SOFT_RESET, 0);
560 #endif
561 
562 	dmic->startcount = 0;
563 
564 	/* Compute unmute ramp gain update coefficient. */
565 	dmic->gain_coef = db2lin_fixed(LOGRAMP_CONST_TERM / dmic->unmute_time_ms);
566 
567 	/* Initial gain value, convert Q12.20 to Q2.30 */
568 	dmic->gain = Q_SHIFT_LEFT(db2lin_fixed(LOGRAMP_START_DB), 20, 30);
569 
570 	dai_dmic_sync_prepare(dmic);
571 
572 	dai_dmic_start_fifo_packers(dmic, dmic->dai_config_params.dai_index);
573 
574 	for (i = 0; i < CONFIG_DAI_DMIC_HW_CONTROLLERS; i++) {
575 		mic_a = dmic->enable[i] & 1;
576 		mic_b = (dmic->enable[i] & 2) >> 1;
577 		start_fir = dmic->enable[i] > 0;
578 
579 		/* If both microphones are needed start them simultaneously
580 		 * to start them in sync. The reset may be cleared for another
581 		 * FIFO already. If only one mic, start them independently.
582 		 * This makes sure we do not clear start/en for another DAI.
583 		 */
584 		if (mic_a && mic_b) {
585 			dai_dmic_update_bits(dmic, dmic_base[i] + CIC_CONTROL,
586 					     CIC_CONTROL_CIC_START_A |
587 					     CIC_CONTROL_CIC_START_B,
588 					     FIELD_PREP(CIC_CONTROL_CIC_START_A, 1) |
589 					     FIELD_PREP(CIC_CONTROL_CIC_START_B, 1));
590 			dai_dmic_update_bits(dmic, dmic_base[i] + MIC_CONTROL,
591 					     MIC_CONTROL_PDM_EN_A |
592 					     MIC_CONTROL_PDM_EN_B,
593 					     FIELD_PREP(MIC_CONTROL_PDM_EN_A, 1) |
594 					     FIELD_PREP(MIC_CONTROL_PDM_EN_B, 1));
595 		} else if (mic_a) {
596 			dai_dmic_update_bits(dmic, dmic_base[i] + CIC_CONTROL,
597 					     CIC_CONTROL_CIC_START_A,
598 					     FIELD_PREP(CIC_CONTROL_CIC_START_A, 1));
599 			dai_dmic_update_bits(dmic, dmic_base[i] + MIC_CONTROL,
600 					     MIC_CONTROL_PDM_EN_A,
601 					     FIELD_PREP(MIC_CONTROL_PDM_EN_A, 1));
602 		} else if (mic_b) {
603 			dai_dmic_update_bits(dmic, dmic_base[i] + CIC_CONTROL,
604 					     CIC_CONTROL_CIC_START_B,
605 					     FIELD_PREP(CIC_CONTROL_CIC_START_B, 1));
606 			dai_dmic_update_bits(dmic, dmic_base[i] + MIC_CONTROL,
607 					     MIC_CONTROL_PDM_EN_B,
608 					     FIELD_PREP(MIC_CONTROL_PDM_EN_B, 1));
609 		}
610 
611 		dai_dmic_update_bits(dmic, dmic_base[i] + FIR_CHANNEL_REGS_SIZE *
612 				     dmic->dai_config_params.dai_index + FIR_CONTROL,
613 				     FIR_CONTROL_START,
614 				     FIELD_PREP(FIR_CONTROL_START, start_fir));
615 	}
616 
617 #ifndef CONFIG_SOC_SERIES_INTEL_ADSP_ACE
618 	/* Clear soft reset for all/used PDM controllers. This should
619 	 * start capture in sync.
620 	 */
621 	for (i = 0; i < CONFIG_DAI_DMIC_HW_CONTROLLERS; i++) {
622 		dai_dmic_update_bits(dmic, dmic_base[i] + CIC_CONTROL,
623 				     CIC_CONTROL_SOFT_RESET, 0);
624 
625 		LOG_INF("dmic_start(), cic 0x%08x",
626 			dai_dmic_read(dmic, dmic_base[i] + CIC_CONTROL));
627 	}
628 #endif
629 
630 	/* Set bit dai->index */
631 	dai_dmic_global.active_fifos_mask |= BIT(dmic->dai_config_params.dai_index);
632 	dai_dmic_global.pause_mask &= ~BIT(dmic->dai_config_params.dai_index);
633 
634 	dmic->state = DAI_STATE_RUNNING;
635 	k_spin_unlock(&dmic->lock, key);
636 
637 	dmic_sync_trigger(dmic);
638 
639 	LOG_INF("dmic_start(), dmic_active_fifos_mask = 0x%x",
640 		dai_dmic_global.active_fifos_mask);
641 }
642 
dai_dmic_stop(struct dai_intel_dmic * dmic,bool stop_is_pause)643 static void dai_dmic_stop(struct dai_intel_dmic *dmic, bool stop_is_pause)
644 {
645 	k_spinlock_key_t key;
646 	int i;
647 
648 	LOG_DBG("dmic_stop()");
649 	key = k_spin_lock(&dmic->lock);
650 
651 	dai_dmic_stop_fifo_packers(dmic, dmic->dai_config_params.dai_index);
652 
653 	/* Set soft reset and mute on for all PDM controllers. */
654 	LOG_INF("dmic_stop(), dmic_active_fifos_mask = 0x%x",
655 			dai_dmic_global.active_fifos_mask);
656 
657 	/* Clear bit dmic->dai_config_params.dai_index for active FIFO.
658 	 * If stop for pause, set pause mask bit.
659 	 * If stop is not for pausing, it is safe to clear the pause bit.
660 	 */
661 	dai_dmic_global.active_fifos_mask &= ~BIT(dmic->dai_config_params.dai_index);
662 	if (stop_is_pause)
663 		dai_dmic_global.pause_mask |= BIT(dmic->dai_config_params.dai_index);
664 	else
665 		dai_dmic_global.pause_mask &= ~BIT(dmic->dai_config_params.dai_index);
666 
667 	for (i = 0; i < CONFIG_DAI_DMIC_HW_CONTROLLERS; i++) {
668 		/* Don't stop CIC yet if one FIFO remains active */
669 		if (dai_dmic_global.active_fifos_mask == 0) {
670 			dai_dmic_update_bits(dmic, dmic_base[i] + CIC_CONTROL,
671 					     CIC_CONTROL_SOFT_RESET |
672 					     CIC_CONTROL_MIC_MUTE,
673 					     CIC_CONTROL_SOFT_RESET |
674 					     CIC_CONTROL_MIC_MUTE);
675 		}
676 		dai_dmic_update_bits(dmic, dmic_base[i] + FIR_CHANNEL_REGS_SIZE *
677 				     dmic->dai_config_params.dai_index + FIR_CONTROL,
678 				     FIR_CONTROL_MUTE,
679 				     FIR_CONTROL_MUTE);
680 	}
681 
682 	k_spin_unlock(&dmic->lock, key);
683 }
684 
dai_dmic_get_properties(const struct device * dev,enum dai_dir dir,int stream_id)685 const struct dai_properties *dai_dmic_get_properties(const struct device *dev,
686 						     enum dai_dir dir,
687 						     int stream_id)
688 {
689 	const struct dai_intel_dmic *dmic = (const struct dai_intel_dmic *)dev->data;
690 	struct dai_properties *prop = (struct dai_properties *)dev->config;
691 
692 	prop->fifo_address = dmic->fifo.offset;
693 	prop->fifo_depth = dmic->fifo.depth;
694 	prop->dma_hs_id = dmic->fifo.handshake;
695 	prop->reg_init_delay = 0;
696 
697 	return prop;
698 }
699 
dai_dmic_trigger(const struct device * dev,enum dai_dir dir,enum dai_trigger_cmd cmd)700 static int dai_dmic_trigger(const struct device *dev, enum dai_dir dir,
701 			    enum dai_trigger_cmd cmd)
702 {
703 	struct dai_intel_dmic *dmic = (struct dai_intel_dmic *)dev->data;
704 
705 	LOG_DBG("dmic_trigger()");
706 
707 	if (dir != DAI_DIR_RX) {
708 		LOG_ERR("dmic_trigger(): direction != DAI_DIR_RX");
709 		return -EINVAL;
710 	}
711 
712 	switch (cmd) {
713 	case DAI_TRIGGER_START:
714 		if (dmic->state == DAI_STATE_PAUSED ||
715 		    dmic->state == DAI_STATE_PRE_RUNNING) {
716 			dai_dmic_start(dmic);
717 			dmic->state = DAI_STATE_RUNNING;
718 		} else {
719 			LOG_ERR("dmic_trigger(): state is not prepare or paused, dmic->state = %u",
720 				dmic->state);
721 		}
722 		break;
723 	case DAI_TRIGGER_STOP:
724 		dai_dmic_stop(dmic, false);
725 		dmic->state = DAI_STATE_PRE_RUNNING;
726 		break;
727 	case DAI_TRIGGER_PAUSE:
728 		dai_dmic_stop(dmic, true);
729 		dmic->state = DAI_STATE_PAUSED;
730 		break;
731 	case DAI_TRIGGER_COPY:
732 		dai_dmic_gain_ramp(dmic);
733 		break;
734 	default:
735 		break;
736 	}
737 
738 	return 0;
739 }
740 
dai_dmic_get_config(const struct device * dev,struct dai_config * cfg,enum dai_dir dir)741 static int dai_dmic_get_config(const struct device *dev, struct dai_config *cfg, enum dai_dir dir)
742 {
743 	struct dai_intel_dmic *dmic = (struct dai_intel_dmic *)dev->data;
744 
745 	if (dir != DAI_DIR_RX) {
746 		return -EINVAL;
747 	}
748 
749 	if (!cfg) {
750 		return -EINVAL;
751 	}
752 
753 	*cfg = dmic->dai_config_params;
754 
755 	return 0;
756 }
757 
dai_dmic_set_config(const struct device * dev,const struct dai_config * cfg,const void * bespoke_cfg)758 static int dai_dmic_set_config(const struct device *dev,
759 		const struct dai_config *cfg, const void *bespoke_cfg)
760 
761 {
762 	struct dai_intel_dmic *dmic = (struct dai_intel_dmic *)dev->data;
763 	int ret = 0;
764 	int di = dmic->dai_config_params.dai_index;
765 	k_spinlock_key_t key;
766 
767 	LOG_INF("dmic_set_config()");
768 
769 	if (di >= CONFIG_DAI_DMIC_HW_FIFOS) {
770 		LOG_ERR("dmic_set_config(): DAI index exceeds number of FIFOs");
771 		return -EINVAL;
772 	}
773 
774 	if (!bespoke_cfg) {
775 		LOG_ERR("dmic_set_config(): NULL config");
776 		return -EINVAL;
777 	}
778 
779 	dai_dmic_program_channel_map(dmic, cfg, di);
780 
781 	key = k_spin_lock(&dmic->lock);
782 
783 #if CONFIG_DAI_INTEL_DMIC_TPLG_PARAMS
784 #error DMIC TPLG is not yet implemented
785 
786 #elif CONFIG_DAI_INTEL_DMIC_NHLT
787 	ret = dai_dmic_set_config_nhlt(dmic, bespoke_cfg);
788 
789 	/* There's no unmute ramp duration in blob, so the default rate dependent is used. */
790 	dmic->unmute_time_ms = dmic_get_unmute_ramp_from_samplerate(dmic->dai_config_params.rate);
791 #else
792 #error No DMIC config selected
793 #endif
794 
795 	if (ret < 0) {
796 		LOG_ERR("dmic_set_config(): Failed to set the requested configuration.");
797 		goto out;
798 	}
799 
800 	dmic->state = DAI_STATE_PRE_RUNNING;
801 
802 out:
803 	k_spin_unlock(&dmic->lock, key);
804 	return ret;
805 }
806 
dai_dmic_probe_wrapper(const struct device * dev)807 static int dai_dmic_probe_wrapper(const struct device *dev)
808 {
809 	struct dai_intel_dmic *dmic = (struct dai_intel_dmic *)dev->data;
810 	k_spinlock_key_t key;
811 	int ret = 0;
812 
813 	key = k_spin_lock(&dmic->lock);
814 
815 	if (dmic->sref == 0) {
816 		ret = dai_dmic_probe(dmic);
817 	}
818 
819 	if (!ret) {
820 		dmic->sref++;
821 	}
822 
823 	k_spin_unlock(&dmic->lock, key);
824 
825 	return ret;
826 }
827 
dai_dmic_remove_wrapper(const struct device * dev)828 static int dai_dmic_remove_wrapper(const struct device *dev)
829 {
830 	struct dai_intel_dmic *dmic = (struct dai_intel_dmic *)dev->data;
831 	k_spinlock_key_t key;
832 	int ret = 0;
833 
834 	key = k_spin_lock(&dmic->lock);
835 
836 	if (--dmic->sref == 0) {
837 		ret = dai_dmic_remove(dmic);
838 	}
839 
840 	k_spin_unlock(&dmic->lock, key);
841 
842 	return ret;
843 }
844 
dmic_pm_action(const struct device * dev,enum pm_device_action action)845 static int dmic_pm_action(const struct device *dev, enum pm_device_action action)
846 {
847 	switch (action) {
848 	case PM_DEVICE_ACTION_SUSPEND:
849 		dai_dmic_remove_wrapper(dev);
850 		break;
851 	case PM_DEVICE_ACTION_RESUME:
852 		dai_dmic_probe_wrapper(dev);
853 		break;
854 	case PM_DEVICE_ACTION_TURN_OFF:
855 	case PM_DEVICE_ACTION_TURN_ON:
856 		/* All device pm is handled during resume and suspend */
857 		break;
858 	default:
859 		return -ENOTSUP;
860 	}
861 
862 	return 0;
863 }
864 
865 const struct dai_driver_api dai_dmic_ops = {
866 	.probe			= pm_device_runtime_get,
867 	.remove			= pm_device_runtime_put,
868 	.config_set		= dai_dmic_set_config,
869 	.config_get		= dai_dmic_get_config,
870 	.get_properties		= dai_dmic_get_properties,
871 	.trigger		= dai_dmic_trigger,
872 	.ts_config		= dai_dmic_timestamp_config,
873 	.ts_start		= dai_timestamp_dmic_start,
874 	.ts_stop		= dai_timestamp_dmic_stop,
875 	.ts_get			= dai_timestamp_dmic_get
876 };
877 
dai_dmic_initialize_device(const struct device * dev)878 static int dai_dmic_initialize_device(const struct device *dev)
879 {
880 	IRQ_CONNECT(
881 		DT_INST_IRQN(0),
882 		IRQ_DEFAULT_PRIORITY,
883 		dai_dmic_irq_handler,
884 		DEVICE_DT_INST_GET(0),
885 		0);
886 	if (pm_device_on_power_domain(dev)) {
887 		pm_device_init_off(dev);
888 	} else {
889 		pm_device_init_suspended(dev);
890 	}
891 
892 	return pm_device_runtime_enable(dev);
893 };
894 
895 
896 #define DAI_INTEL_DMIC_DEVICE_INIT(n)					\
897 	static struct dai_properties dai_intel_dmic_properties_##n;	\
898 									\
899 	static struct dai_intel_dmic dai_intel_dmic_data_##n =		\
900 	{	.dai_config_params =					\
901 		{							\
902 			.type = DAI_INTEL_DMIC,				\
903 			.dai_index = n					\
904 		},							\
905 		.reg_base = DT_INST_REG_ADDR_BY_IDX(n, 0),		\
906 		.shim_base = DT_INST_PROP(n, shim),			\
907 		IF_ENABLED(DT_NODE_EXISTS(DT_NODELABEL(hdamlddmic)),	\
908 			(.hdamldmic_base = DT_REG_ADDR(DT_NODELABEL(hdamlddmic)),))	\
909 		IF_ENABLED(DT_NODE_EXISTS(DT_NODELABEL(dmicvss)),	\
910 			(.vshim_base = DT_REG_ADDR(DT_NODELABEL(dmicvss)),))	\
911 		.irq = DT_INST_IRQN(n),					\
912 		.fifo =							\
913 		{							\
914 			.offset = DT_INST_REG_ADDR_BY_IDX(n, 0)		\
915 				+ DT_INST_PROP(n, fifo),		\
916 			.handshake = DMA_HANDSHAKE_DMIC_CH##n		\
917 		},							\
918 	};								\
919 									\
920 	PM_DEVICE_DT_INST_DEFINE(n, dmic_pm_action);			\
921 									\
922 	DEVICE_DT_INST_DEFINE(n,					\
923 		dai_dmic_initialize_device,				\
924 		PM_DEVICE_DT_INST_GET(n),				\
925 		&dai_intel_dmic_data_##n,				\
926 		&dai_intel_dmic_properties_##n,				\
927 		POST_KERNEL,						\
928 		CONFIG_DAI_INIT_PRIORITY,				\
929 		&dai_dmic_ops);
930 
931 DT_INST_FOREACH_STATUS_OKAY(DAI_INTEL_DMIC_DEVICE_INIT)
932