1 /*
2  * Copyright (c) 2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT intel_dai_dmic
8 #define LOG_DOMAIN dai_intel_dmic
9 #include <zephyr/logging/log.h>
10 LOG_MODULE_REGISTER(LOG_DOMAIN);
11 
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <zephyr/kernel.h>
16 #include <zephyr/spinlock.h>
17 #include <zephyr/devicetree.h>
18 #include <zephyr/pm/device.h>
19 #include <zephyr/pm/device_runtime.h>
20 
21 #include <zephyr/drivers/dai.h>
22 #include <zephyr/irq.h>
23 
24 #include "dmic.h"
25 #include <dmic_regs.h>
26 
27 /* Base addresses (in PDM scope) of 2ch PDM controllers and coefficient RAM. */
28 static const uint32_t dmic_base[4] = {PDM0, PDM1, PDM2, PDM3};
29 
30 /* global data shared between all dmic instances */
31 struct dai_dmic_global_shared dai_dmic_global;
32 
33 /* Helper macro to read 64-bit data using two 32-bit data read */
34 #define sys_read64(addr)    (((uint64_t)(sys_read32(addr + 4)) << 32) | \
35 			     sys_read32(addr))
36 
37 int dai_dmic_set_config_nhlt(struct dai_intel_dmic *dmic, const void *spec_config);
38 
39 /* Exponent function for small values of x. This function calculates
40  * fairly accurately exponent for x in range -2.0 .. +2.0. The iteration
41  * uses first 11 terms of Taylor series approximation for exponent
42  * function. With the current scaling the numerator just remains under
43  * 64 bits with the 11 terms.
44  *
45  * See https://en.wikipedia.org/wiki/Exponential_function#Computation
46  *
47  * The input is Q3.29
48  * The output is Q9.23
49  */
exp_small_fixed(int32_t x)50 static int32_t exp_small_fixed(int32_t x)
51 {
52 	int64_t p;
53 	int64_t num = Q_SHIFT_RND(x, 29, 23);
54 	int32_t y = (int32_t)num;
55 	int32_t den = 1;
56 	int32_t inc;
57 	int k;
58 
59 	/* Numerator is x^k, denominator is k! */
60 	for (k = 2; k < 12; k++) {
61 		p = num * x; /* Q9.23 x Q3.29 -> Q12.52 */
62 		num = Q_SHIFT_RND(p, 52, 23);
63 		den = den * k;
64 		inc = (int32_t)(num / den);
65 		y += inc;
66 	}
67 
68 	return y + ONE_Q23;
69 }
70 
exp_fixed(int32_t x)71 static int32_t exp_fixed(int32_t x)
72 {
73 	int32_t xs;
74 	int32_t y;
75 	int32_t z;
76 	int i;
77 	int n = 0;
78 
79 	if (x < Q_CONVERT_FLOAT(-11.5, 27)) {
80 		return 0;
81 	}
82 
83 	if (x > Q_CONVERT_FLOAT(7.6245, 27)) {
84 		return INT32_MAX;
85 	}
86 
87 	/* x is Q5.27 */
88 	xs = x;
89 	while (xs >= TWO_Q27 || xs <= MINUS_TWO_Q27) {
90 		xs >>= 1;
91 		n++;
92 	}
93 
94 	/* exp_small_fixed() input is Q3.29, while x1 is Q5.27
95 	 * exp_small_fixed() output is Q9.23, while z is Q12.20
96 	 */
97 	z = Q_SHIFT_RND(exp_small_fixed(Q_SHIFT_LEFT(xs, 27, 29)), 23, 20);
98 	y = ONE_Q20;
99 	for (i = 0; i < (1 << n); i++) {
100 		y = (int32_t)Q_MULTSR_32X32((int64_t)y, z, 20, 20, 20);
101 	}
102 
103 	return y;
104 }
105 
db2lin_fixed(int32_t db)106 static int32_t db2lin_fixed(int32_t db)
107 {
108 	int32_t arg;
109 
110 	if (db < Q_CONVERT_FLOAT(-100.0, 24)) {
111 		return 0;
112 	}
113 
114 	/* Q8.24 x Q5.27, result needs to be Q5.27 */
115 	arg = (int32_t)Q_MULTSR_32X32((int64_t)db, LOG10_DIV20_Q27, 24, 27, 27);
116 	return exp_fixed(arg);
117 }
118 
dai_dmic_update_bits(const struct dai_intel_dmic * dmic,uint32_t reg,uint32_t mask,uint32_t val)119 static void dai_dmic_update_bits(const struct dai_intel_dmic *dmic,
120 				 uint32_t reg, uint32_t mask, uint32_t val)
121 {
122 	uint32_t dest = dmic->reg_base + reg;
123 
124 	sys_write32((sys_read32(dest) & (~mask)) | (val & mask), dest);
125 }
126 
dai_dmic_write(const struct dai_intel_dmic * dmic,uint32_t reg,uint32_t val)127 static inline void dai_dmic_write(const struct dai_intel_dmic *dmic,
128 			   uint32_t reg, uint32_t val)
129 {
130 	sys_write32(val, dmic->reg_base + reg);
131 }
132 
dai_dmic_read(const struct dai_intel_dmic * dmic,uint32_t reg)133 static inline uint32_t dai_dmic_read(const struct dai_intel_dmic *dmic,
134 				     uint32_t reg)
135 {
136 	return sys_read32(dmic->reg_base + reg);
137 }
138 
139 #if CONFIG_DAI_DMIC_HAS_OWNERSHIP
dai_dmic_claim_ownership(const struct dai_intel_dmic * dmic)140 static inline void dai_dmic_claim_ownership(const struct dai_intel_dmic *dmic)
141 {
142 	/* DMIC Owner Select to DSP */
143 	sys_write32(sys_read32(dmic->shim_base + DMICLCTL_OFFSET) |
144 		    FIELD_PREP(DMICLCTL_OSEL, 0x3), dmic->shim_base + DMICLCTL_OFFSET);
145 }
146 
dai_dmic_release_ownership(const struct dai_intel_dmic * dmic)147 static inline void dai_dmic_release_ownership(const struct dai_intel_dmic *dmic)
148 {
149 	/* DMIC Owner Select back to Host CPU + DSP */
150 	sys_write32(sys_read32(dmic->shim_base + DMICLCTL_OFFSET) &
151 			~DMICLCTL_OSEL, dmic->shim_base + DMICLCTL_OFFSET);
152 }
153 
154 #else /* CONFIG_DAI_DMIC_HAS_OWNERSHIP */
155 
dai_dmic_claim_ownership(const struct dai_intel_dmic * dmic)156 static inline void dai_dmic_claim_ownership(const struct dai_intel_dmic *dmic) {}
dai_dmic_release_ownership(const struct dai_intel_dmic * dmic)157 static inline void dai_dmic_release_ownership(const struct dai_intel_dmic *dmic) {}
158 
159 #endif /* CONFIG_DAI_DMIC_HAS_OWNERSHIP */
160 
dai_dmic_base(const struct dai_intel_dmic * dmic)161 static inline uint32_t dai_dmic_base(const struct dai_intel_dmic *dmic)
162 {
163 #if defined(CONFIG_SOC_INTEL_ACE20_LNL) || defined(CONFIG_SOC_INTEL_ACE30)
164 	return dmic->hdamldmic_base;
165 #else
166 	return dmic->shim_base;
167 #endif
168 }
169 
170 #if CONFIG_DAI_DMIC_HAS_MULTIPLE_LINE_SYNC
dai_dmic_set_sync_period(uint32_t period,const struct dai_intel_dmic * dmic)171 static inline void dai_dmic_set_sync_period(uint32_t period, const struct dai_intel_dmic *dmic)
172 {
173 	uint32_t val = CONFIG_DAI_DMIC_HW_IOCLK / period - 1;
174 	uint32_t base = dai_dmic_base(dmic);
175 	/* DMIC Change sync period */
176 #if defined(CONFIG_SOC_INTEL_ACE20_LNL) || defined(CONFIG_SOC_INTEL_ACE30)
177 	sys_write32(sys_read32(base + DMICSYNC_OFFSET) | FIELD_PREP(DMICSYNC_SYNCPRD, val),
178 		    base + DMICSYNC_OFFSET);
179 	sys_write32(sys_read32(base + DMICSYNC_OFFSET) | DMICSYNC_SYNCPU,
180 		    base + DMICSYNC_OFFSET);
181 
182 	if (!WAIT_FOR((sys_read32(base + DMICSYNC_OFFSET) & DMICSYNC_SYNCPU) == 0, 1000,
183 		      k_sleep(K_USEC(100)))) {
184 		LOG_ERR("poll timeout");
185 	}
186 
187 	sys_write32(sys_read32(base + DMICSYNC_OFFSET) | DMICSYNC_CMDSYNC,
188 		    base + DMICSYNC_OFFSET);
189 #else /* All other CAVS and ACE platforms */
190 	sys_write32(sys_read32(base + DMICSYNC_OFFSET) | FIELD_PREP(DMICSYNC_SYNCPRD, val),
191 		    base + DMICSYNC_OFFSET);
192 	sys_write32(sys_read32(base + DMICSYNC_OFFSET) | DMICSYNC_CMDSYNC,
193 		    base + DMICSYNC_OFFSET);
194 #endif
195 }
196 
dai_dmic_clear_sync_period(const struct dai_intel_dmic * dmic)197 static inline void dai_dmic_clear_sync_period(const struct dai_intel_dmic *dmic)
198 {
199 	uint32_t base = dai_dmic_base(dmic);
200 	/* DMIC Clean sync period */
201 	sys_write32(sys_read32(base + DMICSYNC_OFFSET) & ~DMICSYNC_SYNCPRD,
202 			base + DMICSYNC_OFFSET);
203 	sys_write32(sys_read32(base + DMICSYNC_OFFSET) & ~DMICSYNC_CMDSYNC,
204 			base + DMICSYNC_OFFSET);
205 }
206 
207 /* Preparing for command synchronization on multiple link segments */
dai_dmic_sync_prepare(const struct dai_intel_dmic * dmic)208 static inline void dai_dmic_sync_prepare(const struct dai_intel_dmic *dmic)
209 {
210 	uint32_t base = dai_dmic_base(dmic);
211 
212 	sys_write32(sys_read32(base + DMICSYNC_OFFSET) | DMICSYNC_CMDSYNC,
213 		    base + DMICSYNC_OFFSET);
214 }
215 
216 /* Trigering synchronization of command execution */
dmic_sync_trigger(const struct dai_intel_dmic * dmic)217 static void dmic_sync_trigger(const struct dai_intel_dmic *dmic)
218 {
219 	uint32_t base = dai_dmic_base(dmic);
220 
221 	__ASSERT_NO_MSG((sys_read32(base + DMICSYNC_OFFSET) & DMICSYNC_CMDSYNC) != 0);
222 
223 	sys_write32(sys_read32(base + DMICSYNC_OFFSET) |
224 		    DMICSYNC_SYNCGO, base + DMICSYNC_OFFSET);
225 
226 	/* waiting for CMDSYNC bit clearing */
227 	if (!WAIT_FOR((sys_read32(base + DMICSYNC_OFFSET) & DMICSYNC_CMDSYNC) == 0,
228 		      1000, k_sleep(K_USEC(100)))) {
229 		LOG_ERR("poll timeout");
230 	}
231 }
232 
233 #else /* CONFIG_DAI_DMIC_HAS_MULTIPLE_LINE_SYNC */
234 
dai_dmic_set_sync_period(uint32_t period,const struct dai_intel_dmic * dmic)235 static inline void dai_dmic_set_sync_period(uint32_t period, const struct dai_intel_dmic *dmic) {}
dai_dmic_clear_sync_period(const struct dai_intel_dmic * dmic)236 static inline void dai_dmic_clear_sync_period(const struct dai_intel_dmic *dmic) {}
dai_dmic_sync_prepare(const struct dai_intel_dmic * dmic)237 static inline void dai_dmic_sync_prepare(const struct dai_intel_dmic *dmic) {}
dmic_sync_trigger(const struct dai_intel_dmic * dmic)238 static void dmic_sync_trigger(const struct dai_intel_dmic *dmic) {}
239 
240 #endif /* CONFIG_DAI_DMIC_HAS_MULTIPLE_LINE_SYNC */
241 
dai_dmic_start_fifo_packers(struct dai_intel_dmic * dmic,int fifo_index)242 static void dai_dmic_start_fifo_packers(struct dai_intel_dmic *dmic, int fifo_index)
243 {
244 
245 	/* Start FIFO packers and clear FIFO initialize bits */
246 	dai_dmic_update_bits(dmic, fifo_index * PDM_CHANNEL_REGS_SIZE + OUTCONTROL,
247 			     OUTCONTROL_SIP | OUTCONTROL_FINIT,
248 			     OUTCONTROL_SIP);
249 }
250 
dai_dmic_stop_fifo_packers(struct dai_intel_dmic * dmic,int fifo_index)251 static void dai_dmic_stop_fifo_packers(struct dai_intel_dmic *dmic,
252 					int fifo_index)
253 {
254 	/* Stop FIFO packers and set FIFO initialize bits */
255 	dai_dmic_update_bits(dmic, fifo_index * PDM_CHANNEL_REGS_SIZE + OUTCONTROL,
256 			     OUTCONTROL_SIP | OUTCONTROL_FINIT,
257 			     OUTCONTROL_FINIT);
258 }
259 
260 /* On DMIC IRQ event trace the status register that contains the status and
261  * error bit fields.
262  */
dai_dmic_irq_handler(const void * data)263 static void dai_dmic_irq_handler(const void *data)
264 {
265 	struct dai_intel_dmic *dmic = ((struct device *)data)->data;
266 	uint32_t val0;
267 	uint32_t val1;
268 
269 	/* Trace OUTSTAT0 register */
270 	val0 = dai_dmic_read(dmic, OUTSTAT);
271 	val1 = dai_dmic_read(dmic, OUTSTAT + PDM_CHANNEL_REGS_SIZE);
272 	LOG_DBG("dmic_irq_handler(), OUTSTAT0 = 0x%x, OUTSTAT1 = 0x%x", val0, val1);
273 
274 	if (val0 & OUTSTAT_ROR) {
275 		LOG_ERR("dmic_irq_handler(): full fifo A or PDM overrun");
276 		dai_dmic_write(dmic, OUTSTAT, val0);
277 		dai_dmic_stop_fifo_packers(dmic, 0);
278 	}
279 
280 	if (val1 & OUTSTAT_ROR) {
281 		LOG_ERR("dmic_irq_handler(): full fifo B or PDM overrun");
282 		dai_dmic_write(dmic, OUTSTAT + PDM_CHANNEL_REGS_SIZE, val1);
283 		dai_dmic_stop_fifo_packers(dmic, 1);
284 	}
285 }
286 
dai_dmic_dis_clk_gating(const struct dai_intel_dmic * dmic)287 static inline void dai_dmic_dis_clk_gating(const struct dai_intel_dmic *dmic)
288 {
289 	/* Disable DMIC clock gating */
290 #if (CONFIG_SOC_INTEL_ACE20_LNL || CONFIG_SOC_INTEL_ACE30)
291 	sys_write32((sys_read32(dmic->vshim_base + DMICLVSCTL_OFFSET) | DMICLVSCTL_DCGD),
292 		    dmic->vshim_base + DMICLVSCTL_OFFSET);
293 #else
294 	sys_write32((sys_read32(dmic->shim_base + DMICLCTL_OFFSET) | DMICLCTL_DCGD),
295 		    dmic->shim_base + DMICLCTL_OFFSET);
296 #endif
297 }
298 
dai_dmic_en_clk_gating(const struct dai_intel_dmic * dmic)299 static inline void dai_dmic_en_clk_gating(const struct dai_intel_dmic *dmic)
300 {
301 	/* Enable DMIC clock gating */
302 #if (CONFIG_SOC_INTEL_ACE20_LNL || CONFIG_SOC_INTEL_ACE30)
303 	sys_write32((sys_read32(dmic->vshim_base + DMICLVSCTL_OFFSET) & ~DMICLVSCTL_DCGD),
304 		    dmic->vshim_base + DMICLVSCTL_OFFSET);
305 #else /* All other CAVS and ACE platforms */
306 	sys_write32((sys_read32(dmic->shim_base + DMICLCTL_OFFSET) & ~DMICLCTL_DCGD),
307 		    dmic->shim_base + DMICLCTL_OFFSET);
308 #endif
309 
310 }
311 
dai_dmic_program_channel_map(const struct dai_intel_dmic * dmic,const struct dai_config * cfg,uint32_t index)312 static inline void dai_dmic_program_channel_map(const struct dai_intel_dmic *dmic,
313 						const struct dai_config *cfg,
314 						uint32_t index)
315 {
316 #if defined(CONFIG_SOC_INTEL_ACE20_LNL) || defined(CONFIG_SOC_INTEL_ACE30)
317 	uint16_t pcmsycm = cfg->link_config;
318 	uint32_t reg_add = dmic->shim_base + DMICXPCMSyCM_OFFSET + 0x0004*index;
319 
320 	sys_write16(pcmsycm, reg_add);
321 #else
322 	ARG_UNUSED(dmic);
323 	ARG_UNUSED(cfg);
324 	ARG_UNUSED(index);
325 #endif /* defined(CONFIG_SOC_INTEL_ACE20_LNL) || defined(CONFIG_SOC_INTEL_ACE30) */
326 }
327 
dai_dmic_en_power(const struct dai_intel_dmic * dmic)328 static inline void dai_dmic_en_power(const struct dai_intel_dmic *dmic)
329 {
330 	uint32_t base = dai_dmic_base(dmic);
331 	/* Enable DMIC power */
332 	sys_write32((sys_read32(base + DMICLCTL_OFFSET) | DMICLCTL_SPA),
333 			base + DMICLCTL_OFFSET);
334 
335 #if defined(CONFIG_SOC_INTEL_ACE20_LNL) || defined(CONFIG_SOC_INTEL_ACE30)
336 	while (!(sys_read32(base + DMICLCTL_OFFSET) & DMICLCTL_CPA)) {
337 		k_sleep(K_USEC(100));
338 	}
339 #endif
340 }
341 
dai_dmic_dis_power(const struct dai_intel_dmic * dmic)342 static inline void dai_dmic_dis_power(const struct dai_intel_dmic *dmic)
343 {
344 	uint32_t base = dai_dmic_base(dmic);
345 	/* Disable DMIC power */
346 	sys_write32((sys_read32(base + DMICLCTL_OFFSET) & (~DMICLCTL_SPA)),
347 		     base + DMICLCTL_OFFSET);
348 }
349 
dai_dmic_probe(struct dai_intel_dmic * dmic)350 static int dai_dmic_probe(struct dai_intel_dmic *dmic)
351 {
352 	LOG_INF("dmic_probe()");
353 
354 	/* Set state, note there is no playback direction support */
355 	dmic->state = DAI_STATE_NOT_READY;
356 
357 	/* Enable DMIC power */
358 	dai_dmic_en_power(dmic);
359 
360 	/* Disable dynamic clock gating for dmic before touching any reg */
361 	dai_dmic_dis_clk_gating(dmic);
362 
363 	/* DMIC Change sync period */
364 	dai_dmic_set_sync_period(CONFIG_DAI_DMIC_PLATFORM_SYNC_PERIOD, dmic);
365 
366 	/* DMIC Owner Select to DSP */
367 	dai_dmic_claim_ownership(dmic);
368 
369 	irq_enable(dmic->irq);
370 
371 	return 0;
372 }
373 
dai_dmic_remove(struct dai_intel_dmic * dmic)374 static int dai_dmic_remove(struct dai_intel_dmic *dmic)
375 {
376 	uint32_t active_fifos_mask = dai_dmic_global.active_fifos_mask;
377 	uint32_t pause_mask = dai_dmic_global.pause_mask;
378 
379 	LOG_INF("dmic_remove()");
380 
381 	irq_disable(dmic->irq);
382 
383 	LOG_INF("dmic_remove(), dmic_active_fifos_mask = 0x%x, dmic_pause_mask = 0x%x",
384 		active_fifos_mask, pause_mask);
385 
386 	/* The next end tasks must be passed if another DAI FIFO still runs.
387 	 * Note: dai_put() function that calls remove() applies the spinlock
388 	 * so it is not needed here to protect access to mask bits.
389 	 */
390 	if (active_fifos_mask || pause_mask) {
391 		return 0;
392 	}
393 
394 	/* Disable DMIC clock and power */
395 	dai_dmic_en_clk_gating(dmic);
396 	dai_dmic_dis_power(dmic);
397 
398 	/* DMIC Clean sync period */
399 	dai_dmic_clear_sync_period(dmic);
400 
401 	/* DMIC Owner Select back to Host CPU + DSP */
402 	dai_dmic_release_ownership(dmic);
403 
404 	return 0;
405 }
406 
dai_dmic_timestamp_config(const struct device * dev,struct dai_ts_cfg * cfg)407 static int dai_dmic_timestamp_config(const struct device *dev, struct dai_ts_cfg *cfg)
408 {
409 	cfg->walclk_rate = CONFIG_DAI_DMIC_HW_IOCLK;
410 
411 	return 0;
412 }
413 
dai_timestamp_dmic_start(const struct device * dev,struct dai_ts_cfg * cfg)414 static int dai_timestamp_dmic_start(const struct device *dev, struct dai_ts_cfg *cfg)
415 {
416 	uint32_t addr = TS_DMIC_LOCAL_TSCTRL;
417 	uint32_t cdmas;
418 
419 	/* Set DMIC timestamp registers */
420 
421 	/* First point CDMAS to GPDMA channel that is used by DMIC
422 	 * also clear NTK to be sure there is no old timestamp.
423 	 */
424 	cdmas = FIELD_PREP(TS_LOCAL_TSCTRL_CDMAS, cfg->dma_chan_index +
425 		cfg->dma_chan_count * cfg->dma_id);
426 	sys_write32(TS_LOCAL_TSCTRL_NTK | cdmas, addr);
427 
428 	/* Request on demand timestamp */
429 	sys_write32(TS_LOCAL_TSCTRL_ODTS | cdmas, addr);
430 
431 	return 0;
432 }
433 
dai_timestamp_dmic_stop(const struct device * dev,struct dai_ts_cfg * cfg)434 static int dai_timestamp_dmic_stop(const struct device *dev, struct dai_ts_cfg *cfg)
435 {
436 	/* Clear NTK and write zero to CDMAS */
437 	sys_write32(TS_LOCAL_TSCTRL_NTK, TS_DMIC_LOCAL_TSCTRL);
438 	return 0;
439 }
440 
dai_timestamp_dmic_get(const struct device * dev,struct dai_ts_cfg * cfg,struct dai_ts_data * tsd)441 static int dai_timestamp_dmic_get(const struct device *dev, struct dai_ts_cfg *cfg,
442 				  struct dai_ts_data *tsd)
443 {
444 	/* Read DMIC timestamp registers */
445 	uint32_t tsctrl = TS_DMIC_LOCAL_TSCTRL;
446 	uint32_t ntk;
447 
448 	/* Read SSP timestamp registers */
449 	ntk = sys_read32(tsctrl) & TS_LOCAL_TSCTRL_NTK;
450 	if (!ntk) {
451 		goto out;
452 	}
453 
454 	/* NTK was set, get wall clock */
455 	tsd->walclk = sys_read64(TS_DMIC_LOCAL_WALCLK);
456 
457 	/* Sample */
458 	tsd->sample = sys_read64(TS_DMIC_LOCAL_SAMPLE);
459 
460 	/* Clear NTK to enable successive timestamps */
461 	sys_write32(TS_LOCAL_TSCTRL_NTK, tsctrl);
462 
463 out:
464 	tsd->walclk_rate = cfg->walclk_rate;
465 	if (!ntk) {
466 		return -ENODATA;
467 	}
468 
469 	return 0;
470 }
471 
472 /* this ramps volume changes over time */
dai_dmic_gain_ramp(struct dai_intel_dmic * dmic)473 static void dai_dmic_gain_ramp(struct dai_intel_dmic *dmic)
474 {
475 	k_spinlock_key_t key;
476 	int32_t gval;
477 	uint32_t val;
478 	int i;
479 
480 	/* Currently there's no DMIC HW internal mutings and wait times
481 	 * applied into this start sequence. It can be implemented here if
482 	 * start of audio capture would contain clicks and/or noise and it
483 	 * is not suppressed by gain ramp somewhere in the capture pipe.
484 	 */
485 	LOG_DBG("DMIC gain ramp");
486 
487 	/*
488 	 * At run-time dmic->gain is only changed in this function, and this
489 	 * function runs in the pipeline task context, so it cannot run
490 	 * concurrently on multiple cores, since there's always only one
491 	 * task associated with each DAI, so we don't need to hold the lock to
492 	 * read the value here.
493 	 */
494 	if (dmic->gain == DMIC_HW_FIR_GAIN_MAX << 11) {
495 		return;
496 	}
497 
498 	key = k_spin_lock(&dmic->lock);
499 
500 	/* Increment gain with logarithmic step.
501 	 * Gain is Q2.30 and gain modifier is Q12.20.
502 	 */
503 	dmic->startcount++;
504 	dmic->gain = q_multsr_sat_32x32(dmic->gain, dmic->gain_coef, Q_SHIFT_GAIN_X_GAIN_COEF);
505 
506 	/* Gain is stored as Q2.30, while HW register is Q1.19 so shift
507 	 * the value right by 11.
508 	 */
509 	gval = dmic->gain >> 11;
510 
511 	/* Note that DMIC gain value zero has a special purpose. Value zero
512 	 * sets gain bypass mode in HW. Zero value will be applied after ramp
513 	 * is complete. It is because exact 1.0 gain is not possible with Q1.19.
514 	 */
515 	if (gval > DMIC_HW_FIR_GAIN_MAX) {
516 		gval = 0;
517 		dmic->gain = DMIC_HW_FIR_GAIN_MAX << 11;
518 	}
519 
520 	/* Write gain to registers */
521 	for (i = 0; i < CONFIG_DAI_DMIC_HW_CONTROLLERS; i++) {
522 		if (!dmic->enable[i]) {
523 			continue;
524 		}
525 
526 		if (dmic->startcount == DMIC_UNMUTE_CIC) {
527 			dai_dmic_update_bits(dmic, dmic_base[i] + CIC_CONTROL,
528 					     CIC_CONTROL_MIC_MUTE, 0);
529 		}
530 
531 		if (dmic->startcount == DMIC_UNMUTE_FIR) {
532 			dai_dmic_update_bits(dmic, dmic_base[i] + FIR_CHANNEL_REGS_SIZE *
533 					     dmic->dai_config_params.dai_index + FIR_CONTROL,
534 					     FIR_CONTROL_MUTE, 0);
535 		}
536 
537 		if (gval != 0) {
538 			val = FIELD_PREP(OUT_GAIN, gval);
539 			dai_dmic_write(dmic, dmic_base[i] + FIR_CHANNEL_REGS_SIZE *
540 				       dmic->dai_config_params.dai_index + OUT_GAIN_LEFT, val);
541 			dai_dmic_write(dmic, dmic_base[i] + FIR_CHANNEL_REGS_SIZE *
542 				       dmic->dai_config_params.dai_index + OUT_GAIN_RIGHT, val);
543 		} else {
544 			dai_dmic_write(dmic, dmic_base[i] + FIR_CHANNEL_REGS_SIZE *
545 				       dmic->dai_config_params.dai_index + OUT_GAIN_LEFT,
546 				       dmic->gain_left);
547 			dai_dmic_write(dmic, dmic_base[i] + FIR_CHANNEL_REGS_SIZE *
548 				       dmic->dai_config_params.dai_index + OUT_GAIN_RIGHT,
549 				       dmic->gain_right);
550 		}
551 	}
552 
553 	k_spin_unlock(&dmic->lock, key);
554 }
555 
dai_dmic_start(struct dai_intel_dmic * dmic)556 static void dai_dmic_start(struct dai_intel_dmic *dmic)
557 {
558 	k_spinlock_key_t key;
559 	int i;
560 	int mic_a;
561 	int mic_b;
562 	int start_fir;
563 
564 	/* enable port */
565 	key = k_spin_lock(&dmic->lock);
566 
567 #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE
568 	for (i = 0; i < CONFIG_DAI_DMIC_HW_CONTROLLERS; i++) {
569 		dai_dmic_update_bits(dmic, dmic_base[i] + CIC_CONTROL, CIC_CONTROL_SOFT_RESET, 0);
570 	}
571 #endif
572 
573 	dmic->startcount = 0;
574 
575 	/* Compute unmute ramp gain update coefficient. */
576 	dmic->gain_coef = db2lin_fixed(LOGRAMP_CONST_TERM / dmic->unmute_time_ms);
577 
578 	/* Initial gain value, convert Q12.20 to Q2.30 */
579 	dmic->gain = Q_SHIFT_LEFT(db2lin_fixed(LOGRAMP_START_DB), 20, 30);
580 
581 	dai_dmic_sync_prepare(dmic);
582 
583 	dai_dmic_start_fifo_packers(dmic, dmic->dai_config_params.dai_index);
584 
585 	for (i = 0; i < CONFIG_DAI_DMIC_HW_CONTROLLERS; i++) {
586 		mic_a = dmic->enable[i] & 1;
587 		mic_b = (dmic->enable[i] & 2) >> 1;
588 		start_fir = dmic->enable[i] > 0;
589 
590 		/* If both microphones are needed start them simultaneously
591 		 * to start them in sync. The reset may be cleared for another
592 		 * FIFO already. If only one mic, start them independently.
593 		 * This makes sure we do not clear start/en for another DAI.
594 		 */
595 		if (mic_a && mic_b) {
596 			dai_dmic_update_bits(dmic, dmic_base[i] + CIC_CONTROL,
597 					     CIC_CONTROL_CIC_START_A |
598 					     CIC_CONTROL_CIC_START_B,
599 					     FIELD_PREP(CIC_CONTROL_CIC_START_A, 1) |
600 					     FIELD_PREP(CIC_CONTROL_CIC_START_B, 1));
601 			dai_dmic_update_bits(dmic, dmic_base[i] + MIC_CONTROL,
602 					     MIC_CONTROL_PDM_EN_A |
603 					     MIC_CONTROL_PDM_EN_B,
604 					     FIELD_PREP(MIC_CONTROL_PDM_EN_A, 1) |
605 					     FIELD_PREP(MIC_CONTROL_PDM_EN_B, 1));
606 		} else if (mic_a) {
607 			dai_dmic_update_bits(dmic, dmic_base[i] + CIC_CONTROL,
608 					     CIC_CONTROL_CIC_START_A,
609 					     FIELD_PREP(CIC_CONTROL_CIC_START_A, 1));
610 			dai_dmic_update_bits(dmic, dmic_base[i] + MIC_CONTROL,
611 					     MIC_CONTROL_PDM_EN_A,
612 					     FIELD_PREP(MIC_CONTROL_PDM_EN_A, 1));
613 		} else if (mic_b) {
614 			dai_dmic_update_bits(dmic, dmic_base[i] + CIC_CONTROL,
615 					     CIC_CONTROL_CIC_START_B,
616 					     FIELD_PREP(CIC_CONTROL_CIC_START_B, 1));
617 			dai_dmic_update_bits(dmic, dmic_base[i] + MIC_CONTROL,
618 					     MIC_CONTROL_PDM_EN_B,
619 					     FIELD_PREP(MIC_CONTROL_PDM_EN_B, 1));
620 		}
621 
622 		dai_dmic_update_bits(dmic, dmic_base[i] + FIR_CHANNEL_REGS_SIZE *
623 				     dmic->dai_config_params.dai_index + FIR_CONTROL,
624 				     FIR_CONTROL_START,
625 				     FIELD_PREP(FIR_CONTROL_START, start_fir));
626 	}
627 
628 #ifndef CONFIG_SOC_SERIES_INTEL_ADSP_ACE
629 	/* Clear soft reset for all/used PDM controllers. This should
630 	 * start capture in sync.
631 	 */
632 	for (i = 0; i < CONFIG_DAI_DMIC_HW_CONTROLLERS; i++) {
633 		dai_dmic_update_bits(dmic, dmic_base[i] + CIC_CONTROL,
634 				     CIC_CONTROL_SOFT_RESET, 0);
635 
636 		LOG_INF("dmic_start(), cic 0x%08x",
637 			dai_dmic_read(dmic, dmic_base[i] + CIC_CONTROL));
638 	}
639 #endif
640 
641 	/* Set bit dai->index */
642 	dai_dmic_global.active_fifos_mask |= BIT(dmic->dai_config_params.dai_index);
643 	dai_dmic_global.pause_mask &= ~BIT(dmic->dai_config_params.dai_index);
644 
645 	dmic->state = DAI_STATE_RUNNING;
646 	k_spin_unlock(&dmic->lock, key);
647 
648 	dmic_sync_trigger(dmic);
649 
650 	LOG_INF("dmic_start(), dmic_active_fifos_mask = 0x%x",
651 		dai_dmic_global.active_fifos_mask);
652 }
653 
dai_dmic_stop(struct dai_intel_dmic * dmic,bool stop_is_pause)654 static void dai_dmic_stop(struct dai_intel_dmic *dmic, bool stop_is_pause)
655 {
656 	k_spinlock_key_t key;
657 	int i;
658 
659 	LOG_DBG("dmic_stop()");
660 	key = k_spin_lock(&dmic->lock);
661 
662 	dai_dmic_stop_fifo_packers(dmic, dmic->dai_config_params.dai_index);
663 
664 	/* Set soft reset and mute on for all PDM controllers. */
665 	LOG_INF("dmic_stop(), dmic_active_fifos_mask = 0x%x",
666 			dai_dmic_global.active_fifos_mask);
667 
668 	/* Clear bit dmic->dai_config_params.dai_index for active FIFO.
669 	 * If stop for pause, set pause mask bit.
670 	 * If stop is not for pausing, it is safe to clear the pause bit.
671 	 */
672 	dai_dmic_global.active_fifos_mask &= ~BIT(dmic->dai_config_params.dai_index);
673 	if (stop_is_pause) {
674 		dai_dmic_global.pause_mask |= BIT(dmic->dai_config_params.dai_index);
675 	} else {
676 		dai_dmic_global.pause_mask &= ~BIT(dmic->dai_config_params.dai_index);
677 	}
678 
679 	for (i = 0; i < CONFIG_DAI_DMIC_HW_CONTROLLERS; i++) {
680 		/* Don't stop CIC yet if one FIFO remains active */
681 		if (dai_dmic_global.active_fifos_mask == 0) {
682 			dai_dmic_update_bits(dmic, dmic_base[i] + CIC_CONTROL,
683 					     CIC_CONTROL_SOFT_RESET |
684 					     CIC_CONTROL_MIC_MUTE,
685 					     CIC_CONTROL_SOFT_RESET |
686 					     CIC_CONTROL_MIC_MUTE);
687 		}
688 		dai_dmic_update_bits(dmic, dmic_base[i] + FIR_CHANNEL_REGS_SIZE *
689 				     dmic->dai_config_params.dai_index + FIR_CONTROL,
690 				     FIR_CONTROL_MUTE,
691 				     FIR_CONTROL_MUTE);
692 	}
693 
694 	k_spin_unlock(&dmic->lock, key);
695 }
696 
dai_dmic_get_properties(const struct device * dev,enum dai_dir dir,int stream_id)697 const struct dai_properties *dai_dmic_get_properties(const struct device *dev,
698 						     enum dai_dir dir,
699 						     int stream_id)
700 {
701 	const struct dai_intel_dmic *dmic = (const struct dai_intel_dmic *)dev->data;
702 	struct dai_properties *prop = (struct dai_properties *)dev->config;
703 
704 	prop->fifo_address = dmic->fifo.offset;
705 	prop->fifo_depth = dmic->fifo.depth;
706 	prop->dma_hs_id = dmic->fifo.handshake;
707 	prop->reg_init_delay = 0;
708 
709 	return prop;
710 }
711 
dai_dmic_trigger(const struct device * dev,enum dai_dir dir,enum dai_trigger_cmd cmd)712 static int dai_dmic_trigger(const struct device *dev, enum dai_dir dir,
713 			    enum dai_trigger_cmd cmd)
714 {
715 	struct dai_intel_dmic *dmic = (struct dai_intel_dmic *)dev->data;
716 
717 	LOG_DBG("dmic_trigger()");
718 
719 	if (dir != DAI_DIR_RX) {
720 		LOG_ERR("dmic_trigger(): direction != DAI_DIR_RX");
721 		return -EINVAL;
722 	}
723 
724 	switch (cmd) {
725 	case DAI_TRIGGER_START:
726 		if (dmic->state == DAI_STATE_PAUSED ||
727 		    dmic->state == DAI_STATE_PRE_RUNNING) {
728 			dai_dmic_start(dmic);
729 			dmic->state = DAI_STATE_RUNNING;
730 		} else {
731 			LOG_ERR("dmic_trigger(): state is not prepare or paused, dmic->state = %u",
732 				dmic->state);
733 		}
734 		break;
735 	case DAI_TRIGGER_STOP:
736 		dai_dmic_stop(dmic, false);
737 		dmic->state = DAI_STATE_PRE_RUNNING;
738 		break;
739 	case DAI_TRIGGER_PAUSE:
740 		dai_dmic_stop(dmic, true);
741 		dmic->state = DAI_STATE_PAUSED;
742 		break;
743 	case DAI_TRIGGER_COPY:
744 		dai_dmic_gain_ramp(dmic);
745 		break;
746 	default:
747 		break;
748 	}
749 
750 	return 0;
751 }
752 
dai_dmic_get_config(const struct device * dev,struct dai_config * cfg,enum dai_dir dir)753 static int dai_dmic_get_config(const struct device *dev, struct dai_config *cfg, enum dai_dir dir)
754 {
755 	struct dai_intel_dmic *dmic = (struct dai_intel_dmic *)dev->data;
756 
757 	if (dir != DAI_DIR_RX) {
758 		return -EINVAL;
759 	}
760 
761 	if (!cfg) {
762 		return -EINVAL;
763 	}
764 
765 	*cfg = dmic->dai_config_params;
766 
767 	return 0;
768 }
769 
dai_dmic_set_config(const struct device * dev,const struct dai_config * cfg,const void * bespoke_cfg)770 static int dai_dmic_set_config(const struct device *dev,
771 		const struct dai_config *cfg, const void *bespoke_cfg)
772 
773 {
774 	struct dai_intel_dmic *dmic = (struct dai_intel_dmic *)dev->data;
775 	int ret = 0;
776 	int di = dmic->dai_config_params.dai_index;
777 	k_spinlock_key_t key;
778 
779 	LOG_INF("dmic_set_config()");
780 
781 	if (di >= CONFIG_DAI_DMIC_HW_FIFOS) {
782 		LOG_ERR("dmic_set_config(): DAI index exceeds number of FIFOs");
783 		return -EINVAL;
784 	}
785 
786 	if (!bespoke_cfg) {
787 		LOG_ERR("dmic_set_config(): NULL config");
788 		return -EINVAL;
789 	}
790 
791 	dai_dmic_program_channel_map(dmic, cfg, di);
792 
793 	key = k_spin_lock(&dmic->lock);
794 
795 #if CONFIG_DAI_INTEL_DMIC_TPLG_PARAMS
796 #error DMIC TPLG is not yet implemented
797 
798 #elif CONFIG_DAI_INTEL_DMIC_NHLT
799 	ret = dai_dmic_set_config_nhlt(dmic, bespoke_cfg);
800 
801 	/* There's no unmute ramp duration in blob, so the default rate dependent is used. */
802 	dmic->unmute_time_ms = dmic_get_unmute_ramp_from_samplerate(dmic->dai_config_params.rate);
803 #else
804 #error No DMIC config selected
805 #endif
806 
807 	if (ret < 0) {
808 		LOG_ERR("dmic_set_config(): Failed to set the requested configuration.");
809 		goto out;
810 	}
811 
812 	dmic->state = DAI_STATE_PRE_RUNNING;
813 
814 out:
815 	k_spin_unlock(&dmic->lock, key);
816 	return ret;
817 }
818 
dai_dmic_probe_wrapper(const struct device * dev)819 static int dai_dmic_probe_wrapper(const struct device *dev)
820 {
821 	struct dai_intel_dmic *dmic = (struct dai_intel_dmic *)dev->data;
822 	k_spinlock_key_t key;
823 	int ret = 0;
824 
825 	key = k_spin_lock(&dmic->lock);
826 
827 	if (dmic->sref == 0) {
828 		ret = dai_dmic_probe(dmic);
829 	}
830 
831 	if (!ret) {
832 		dmic->sref++;
833 	}
834 
835 	k_spin_unlock(&dmic->lock, key);
836 
837 	return ret;
838 }
839 
dai_dmic_remove_wrapper(const struct device * dev)840 static int dai_dmic_remove_wrapper(const struct device *dev)
841 {
842 	struct dai_intel_dmic *dmic = (struct dai_intel_dmic *)dev->data;
843 	k_spinlock_key_t key;
844 	int ret = 0;
845 
846 	key = k_spin_lock(&dmic->lock);
847 
848 	if (--dmic->sref == 0) {
849 		ret = dai_dmic_remove(dmic);
850 	}
851 
852 	k_spin_unlock(&dmic->lock, key);
853 
854 	return ret;
855 }
856 
dmic_pm_action(const struct device * dev,enum pm_device_action action)857 static int dmic_pm_action(const struct device *dev, enum pm_device_action action)
858 {
859 	switch (action) {
860 	case PM_DEVICE_ACTION_SUSPEND:
861 		dai_dmic_remove_wrapper(dev);
862 		break;
863 	case PM_DEVICE_ACTION_RESUME:
864 		dai_dmic_probe_wrapper(dev);
865 		break;
866 	case PM_DEVICE_ACTION_TURN_OFF:
867 	case PM_DEVICE_ACTION_TURN_ON:
868 		/* All device pm is handled during resume and suspend */
869 		break;
870 	default:
871 		return -ENOTSUP;
872 	}
873 
874 	return 0;
875 }
876 
877 DEVICE_API(dai, dai_dmic_ops) = {
878 	.probe			= pm_device_runtime_get,
879 	.remove			= pm_device_runtime_put,
880 	.config_set		= dai_dmic_set_config,
881 	.config_get		= dai_dmic_get_config,
882 	.get_properties		= dai_dmic_get_properties,
883 	.trigger		= dai_dmic_trigger,
884 	.ts_config		= dai_dmic_timestamp_config,
885 	.ts_start		= dai_timestamp_dmic_start,
886 	.ts_stop		= dai_timestamp_dmic_stop,
887 	.ts_get			= dai_timestamp_dmic_get
888 };
889 
dai_dmic_initialize_device(const struct device * dev)890 static int dai_dmic_initialize_device(const struct device *dev)
891 {
892 	IRQ_CONNECT(
893 		DT_INST_IRQN(0),
894 		IRQ_DEFAULT_PRIORITY,
895 		dai_dmic_irq_handler,
896 		DEVICE_DT_INST_GET(0),
897 		0);
898 
899 	return pm_device_driver_init(dev, dmic_pm_action);
900 };
901 
902 
903 #define DAI_INTEL_DMIC_DEVICE_INIT(n)					\
904 	static struct dai_properties dai_intel_dmic_properties_##n;	\
905 									\
906 	static struct dai_intel_dmic dai_intel_dmic_data_##n =		\
907 	{	.dai_config_params =					\
908 		{							\
909 			.type = DAI_INTEL_DMIC,				\
910 			.dai_index = n					\
911 		},							\
912 		.reg_base = DT_INST_REG_ADDR_BY_IDX(n, 0),		\
913 		.shim_base = DT_INST_PROP(n, shim),			\
914 		IF_ENABLED(DT_NODE_EXISTS(DT_NODELABEL(hdamlddmic)),	\
915 			(.hdamldmic_base = DT_REG_ADDR(DT_NODELABEL(hdamlddmic)),))	\
916 		IF_ENABLED(DT_NODE_EXISTS(DT_NODELABEL(dmicvss)),	\
917 			(.vshim_base = DT_REG_ADDR(DT_NODELABEL(dmicvss)),))	\
918 		.irq = DT_INST_IRQN(n),					\
919 		.fifo =							\
920 		{							\
921 			.offset = DT_INST_REG_ADDR_BY_IDX(n, 0)		\
922 				+ DT_INST_PROP(n, fifo),		\
923 			.handshake = DMA_HANDSHAKE_DMIC_CH##n		\
924 		},							\
925 	};								\
926 									\
927 	PM_DEVICE_DT_INST_DEFINE(n, dmic_pm_action);			\
928 									\
929 	DEVICE_DT_INST_DEFINE(n,					\
930 		dai_dmic_initialize_device,				\
931 		PM_DEVICE_DT_INST_GET(n),				\
932 		&dai_intel_dmic_data_##n,				\
933 		&dai_intel_dmic_properties_##n,				\
934 		POST_KERNEL,						\
935 		CONFIG_DAI_INIT_PRIORITY,				\
936 		&dai_dmic_ops);
937 
938 DT_INST_FOREACH_STATUS_OKAY(DAI_INTEL_DMIC_DEVICE_INIT)
939