1 /*
2 * Copyright (c) 2022 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <errno.h>
8 #include <zephyr/sys/util_macro.h>
9 #include <stdbool.h>
10 #include <stdint.h>
11 #include <zephyr/spinlock.h>
12 #include <zephyr/devicetree.h>
13 #include <zephyr/pm/device.h>
14 #include <zephyr/pm/device_runtime.h>
15 #define LOG_DOMAIN dai_intel_ssp
16 #include <zephyr/logging/log.h>
17 LOG_MODULE_REGISTER(LOG_DOMAIN);
18
19 #include "ssp.h"
20
21
22 #define dai_set_drvdata(dai, data) (dai->priv_data = data)
23 #define dai_get_drvdata(dai) dai->priv_data
24 #define dai_get_plat_data(dai) dai->ssp_plat_data
25 #define dai_get_mn(dai) dai->ssp_plat_data->mn_inst
26 #define dai_get_ftable(dai) dai->ssp_plat_data->ftable
27 #define dai_get_fsources(dai) dai->ssp_plat_data->fsources
28 #define dai_mn_base(dai) dai->ssp_plat_data->mn_inst->base
29 #define dai_base(dai) dai->ssp_plat_data->base
30 #define dai_ip_base(dai) dai->ssp_plat_data->ip_base
31 #define dai_shim_base(dai) dai->ssp_plat_data->shim_base
32 #define dai_hdamlssp_base(dai) dai->ssp_plat_data->hdamlssp_base
33 #define dai_i2svss_base(dai) dai->ssp_plat_data->i2svss_base
34
35 #define DAI_DIR_PLAYBACK 0
36 #define DAI_DIR_CAPTURE 1
37 #define SSP_ARRAY_INDEX(dir) dir == DAI_DIR_RX ? DAI_DIR_CAPTURE : DAI_DIR_PLAYBACK
38
39 static const char irq_name_level5_z[] = "level5";
40
41 static struct dai_intel_ssp_freq_table ssp_freq_table[] = {
42 { DT_PROP(DT_NODELABEL(audioclk), clock_frequency),
43 DT_PROP(DT_NODELABEL(audioclk), clock_frequency) / 1000},
44 { DT_PROP(DT_NODELABEL(sysclk), clock_frequency),
45 DT_PROP(DT_NODELABEL(sysclk), clock_frequency) / 1000},
46 { DT_PROP(DT_NODELABEL(pllclk), clock_frequency),
47 DT_PROP(DT_NODELABEL(pllclk), clock_frequency) / 1000},
48 };
49
50 static uint32_t ssp_freq_sources[] = {
51 DAI_INTEL_SSP_CLOCK_AUDIO_CARDINAL,
52 DAI_INTEL_SSP_CLOCK_XTAL_OSCILLATOR,
53 DAI_INTEL_SSP_CLOCK_PLL_FIXED,
54 };
55
56 static struct dai_intel_ssp_mn ssp_mn_divider = {
57 .base = DT_REG_ADDR_BY_IDX(DT_NODELABEL(ssp0), 1),
58 };
59
60
61 #define INTEL_SSP_INST_DEFINE(node_id) { \
62 .is_initialized = false, \
63 .is_power_en = false, \
64 .acquire_count = 0, \
65 .ssp_index = DT_PROP(node_id, ssp_index), \
66 .base = DT_REG_ADDR_BY_IDX(node_id, 0), \
67 IF_ENABLED(DT_NODE_EXISTS(DT_NODELABEL(sspbase)), \
68 (.ip_base = DT_REG_ADDR_BY_IDX(DT_NODELABEL(sspbase), 0),)) \
69 .shim_base = DT_REG_ADDR_BY_IDX(DT_NODELABEL(shim), 0), \
70 IF_ENABLED(DT_NODE_EXISTS(DT_NODELABEL(hdamlssp)), \
71 (.hdamlssp_base = DT_REG_ADDR(DT_NODELABEL(hdamlssp)),))\
72 IF_ENABLED(DT_PROP_HAS_IDX(node_id, i2svss, 0), \
73 (.i2svss_base = DT_PROP_BY_IDX(node_id, i2svss, 0),)) \
74 .irq = DT_NUM_IRQS(node_id), \
75 .irq_name = irq_name_level5_z, \
76 .fifo[DAI_DIR_PLAYBACK].offset = \
77 DT_REG_ADDR_BY_IDX(node_id, 0) + OUT_FIFO, \
78 .fifo[DAI_DIR_PLAYBACK].handshake = \
79 DT_DMAS_CELL_BY_NAME(node_id, tx, channel), \
80 .fifo[DAI_DIR_CAPTURE].offset = \
81 DT_REG_ADDR_BY_IDX(node_id, 0) + IN_FIFO, \
82 .fifo[DAI_DIR_CAPTURE].handshake = \
83 DT_DMAS_CELL_BY_NAME(node_id, rx, channel), \
84 .mn_inst = &ssp_mn_divider, \
85 .ftable = ssp_freq_table, \
86 .fsources = ssp_freq_sources, \
87 .clk_active = 0, \
88 },
89
90 static struct dai_intel_ssp_plat_data ssp_plat_data_table[] = {
91 DT_FOREACH_STATUS_OKAY(intel_ssp, INTEL_SSP_INST_DEFINE)
92 };
93
94
ssp_get_instance_count(void)95 static uint32_t ssp_get_instance_count(void)
96 {
97 return ARRAY_SIZE(ssp_plat_data_table);
98 }
99
100
ssp_get_device_instance(uint32_t ssp_index)101 static struct dai_intel_ssp_plat_data *ssp_get_device_instance(uint32_t ssp_index)
102 {
103 uint32_t ssp_instance = ssp_get_instance_count();
104 uint32_t i;
105
106 for (i = 0; i < ssp_instance; i++) {
107 if (ssp_plat_data_table[i].ssp_index == ssp_index) {
108 return &ssp_plat_data_table[i];
109 }
110 }
111
112 return NULL;
113 }
114
115
dai_ssp_update_bits(struct dai_intel_ssp * dp,uint32_t reg,uint32_t mask,uint32_t val)116 static void dai_ssp_update_bits(struct dai_intel_ssp *dp, uint32_t reg, uint32_t mask, uint32_t val)
117 {
118 uint32_t dest = dai_base(dp) + reg;
119
120 LOG_DBG("base %x, reg %x, mask %x, value %x", dai_base(dp), reg, mask, val);
121
122 sys_write32((sys_read32(dest) & (~mask)) | (val & mask), dest);
123 }
124
125 #if CONFIG_INTEL_MN
dai_ssp_gcd(int a,int b)126 static int dai_ssp_gcd(int a, int b)
127 {
128 int aux;
129 int k;
130
131 if (a == 0) {
132 return b;
133 }
134
135 if (b == 0) {
136 return a;
137 }
138
139 /* If the numbers are negative, convert them to positive numbers
140 * gcd(a, b) = gcd(-a, -b) = gcd(-a, b) = gcd(a, -b)
141 */
142 if (a < 0) {
143 a = -a;
144 }
145
146 if (b < 0) {
147 b = -b;
148 }
149
150 /* Find the greatest power of 2 that devides both a and b */
151 for (k = 0; ((a | b) & 1) == 0; k++) {
152 a >>= 1;
153 b >>= 1;
154 }
155
156 /* divide by 2 until a becomes odd */
157 while ((a & 1) == 0) {
158 a >>= 1;
159 }
160
161 do {
162 /*if b is even, remove all factors of 2*/
163 while ((b & 1) == 0) {
164 b >>= 1;
165 }
166
167 /* both a and b are odd now. Swap so a <= b
168 * then set b = b - a, which is also even
169 */
170 if (a > b) {
171 aux = a;
172 a = b;
173 b = aux;
174 }
175
176 b = b - a;
177
178 } while (b != 0);
179
180 /* restore common factors of 2 */
181 return a << k;
182 }
183 #endif
184
185 /**
186 * \brief Checks if given clock is used as source for any MCLK.
187 *
188 * \return true if any port use given clock source, false otherwise.
189 */
dai_ssp_is_mclk_source_in_use(struct dai_intel_ssp * dp)190 static bool dai_ssp_is_mclk_source_in_use(struct dai_intel_ssp *dp)
191 {
192 struct dai_intel_ssp_mn *mp = dai_get_mn(dp);
193 bool ret = false;
194 int i;
195
196 for (i = 0; i < ARRAY_SIZE(mp->mclk_sources_ref); i++) {
197 if (mp->mclk_sources_ref[i] > 0) {
198 ret = true;
199 break;
200 }
201 }
202
203 return ret;
204 }
205
206 /**
207 * \brief Configures source clock for MCLK.
208 * All MCLKs share the same source, so it should be changed
209 * only if there are no other ports using it already.
210 * \param[in] mclk_rate main clock frequency.
211 * \return 0 on success, error code otherwise.
212 */
dai_ssp_setup_initial_mclk_source(struct dai_intel_ssp * dp,uint32_t mclk_id,uint32_t mclk_rate)213 static int dai_ssp_setup_initial_mclk_source(struct dai_intel_ssp *dp, uint32_t mclk_id,
214 uint32_t mclk_rate)
215 {
216 struct dai_intel_ssp_freq_table *ft = dai_get_ftable(dp);
217 uint32_t *fs = dai_get_fsources(dp);
218 struct dai_intel_ssp_mn *mp = dai_get_mn(dp);
219 int clk_index = -1;
220 uint32_t mdivc;
221 int ret = 0;
222 int i;
223
224 if (mclk_id >= DAI_INTEL_SSP_NUM_MCLK) {
225 LOG_ERR("can't configure MCLK %d, only %d mclk[s] existed!",
226 mclk_id, DAI_INTEL_SSP_NUM_MCLK);
227 ret = -EINVAL;
228 goto out;
229 }
230
231 /* searching the smallest possible mclk source */
232 for (i = 0; i <= DAI_INTEL_SSP_MAX_FREQ_INDEX; i++) {
233 if (ft[i].freq % mclk_rate == 0) {
234 clk_index = i;
235 break;
236 }
237 }
238
239 if (clk_index < 0) {
240 LOG_ERR("MCLK %d, no valid source", mclk_rate);
241 ret = -EINVAL;
242 goto out;
243 }
244
245 mp->mclk_source_clock = clk_index;
246
247 mdivc = sys_read32(dai_mn_base(dp) + MN_MDIVCTRL);
248
249 /* enable MCLK divider */
250 mdivc |= MN_MDIVCTRL_M_DIV_ENABLE(mclk_id);
251
252 /* clear source mclk clock - bits 17-16 */
253 mdivc &= ~MCDSS(MN_SOURCE_CLKS_MASK);
254
255 /* select source clock */
256 mdivc |= MCDSS(fs[clk_index]);
257
258 sys_write32(mdivc, dai_mn_base(dp) + MN_MDIVCTRL);
259
260 mp->mclk_sources_ref[mclk_id]++;
261 out:
262
263 return ret;
264 }
265
266 /**
267 * \brief Checks if requested MCLK can be achieved with current source.
268 * \param[in] mclk_rate main clock frequency.
269 * \return 0 on success, error code otherwise.
270 */
dai_ssp_check_current_mclk_source(struct dai_intel_ssp * dp,uint16_t mclk_id,uint32_t mclk_rate)271 static int dai_ssp_check_current_mclk_source(struct dai_intel_ssp *dp, uint16_t mclk_id,
272 uint32_t mclk_rate)
273 {
274 struct dai_intel_ssp_freq_table *ft = dai_get_ftable(dp);
275 struct dai_intel_ssp_mn *mp = dai_get_mn(dp);
276 uint32_t mdivc;
277 int ret = 0;
278
279 LOG_INF("MCLK %d, source = %d", mclk_rate, mp->mclk_source_clock);
280
281 if (ft[mp->mclk_source_clock].freq % mclk_rate != 0) {
282 LOG_ERR("MCLK %d, no valid configuration for already selected source = %d",
283 mclk_rate, mp->mclk_source_clock);
284 ret = -EINVAL;
285 }
286
287 /* if the mclk is already used, can't change its divider, just increase ref count */
288 if (mp->mclk_sources_ref[mclk_id] > 0) {
289 if (mp->mclk_rate[mclk_id] != mclk_rate) {
290 LOG_ERR("Can't set MCLK %d to %d, it is already configured to %d",
291 mclk_id, mclk_rate, mp->mclk_rate[mclk_id]);
292 return -EINVAL;
293 }
294
295 mp->mclk_sources_ref[mclk_id]++;
296 } else {
297 mdivc = sys_read32(dai_mn_base(dp) + MN_MDIVCTRL);
298
299 /* enable MCLK divider */
300 mdivc |= MN_MDIVCTRL_M_DIV_ENABLE(mclk_id);
301 sys_write32(mdivc, dai_mn_base(dp) + MN_MDIVCTRL);
302
303 mp->mclk_sources_ref[mclk_id]++;
304 }
305
306 return ret;
307 }
308
309 /**
310 * \brief Sets MCLK divider to given value.
311 * \param[in] mclk_id ID of MCLK.
312 * \param[in] mdivr_val divider value.
313 * \return 0 on success, error code otherwise.
314 */
dai_ssp_set_mclk_divider(struct dai_intel_ssp * dp,uint16_t mclk_id,uint32_t mdivr_val)315 static int dai_ssp_set_mclk_divider(struct dai_intel_ssp *dp, uint16_t mclk_id, uint32_t mdivr_val)
316 {
317 uint32_t mdivr;
318
319 LOG_INF("mclk_id %d mdivr_val %d", mclk_id, mdivr_val);
320 switch (mdivr_val) {
321 case 1:
322 mdivr = 0x00000fff; /* bypass divider for MCLK */
323 break;
324 case 2 ... 8:
325 mdivr = mdivr_val - 2; /* 1/n */
326 break;
327 default:
328 LOG_ERR("invalid mdivr_val %d", mdivr_val);
329 return -EINVAL;
330 }
331
332 sys_write32(mdivr, dai_mn_base(dp) + MN_MDIVR(mclk_id));
333
334 return 0;
335 }
336
dai_ssp_mn_set_mclk(struct dai_intel_ssp * dp,uint16_t mclk_id,uint32_t mclk_rate)337 static int dai_ssp_mn_set_mclk(struct dai_intel_ssp *dp, uint16_t mclk_id, uint32_t mclk_rate)
338 {
339 struct dai_intel_ssp_freq_table *ft = dai_get_ftable(dp);
340 struct dai_intel_ssp_mn *mp = dai_get_mn(dp);
341 k_spinlock_key_t key;
342 int ret = 0;
343
344 if (mclk_id >= DAI_INTEL_SSP_NUM_MCLK) {
345 LOG_ERR("mclk ID (%d) >= %d", mclk_id, DAI_INTEL_SSP_NUM_MCLK);
346 return -EINVAL;
347 }
348
349 key = k_spin_lock(&mp->lock);
350
351 if (dai_ssp_is_mclk_source_in_use(dp)) {
352 ret = dai_ssp_check_current_mclk_source(dp, mclk_id, mclk_rate);
353 } else {
354 ret = dai_ssp_setup_initial_mclk_source(dp, mclk_id, mclk_rate);
355 }
356
357 if (ret < 0) {
358 goto out;
359 }
360
361 LOG_INF("mclk_rate %d, mclk_source_clock %d", mclk_rate, mp->mclk_source_clock);
362
363 ret = dai_ssp_set_mclk_divider(dp, mclk_id, ft[mp->mclk_source_clock].freq / mclk_rate);
364 if (!ret) {
365 mp->mclk_rate[mclk_id] = mclk_rate;
366 }
367
368 out:
369 k_spin_unlock(&mp->lock, key);
370
371 return ret;
372 }
373
dai_ssp_mn_set_mclk_blob(struct dai_intel_ssp * dp,uint32_t mdivc,uint32_t mdivr)374 static int dai_ssp_mn_set_mclk_blob(struct dai_intel_ssp *dp, uint32_t mdivc, uint32_t mdivr)
375 {
376 sys_write32(mdivc, dai_mn_base(dp) + MN_MDIVCTRL);
377 sys_write32(mdivr, dai_mn_base(dp) + MN_MDIVR(0));
378
379 return 0;
380 }
381
dai_ssp_mn_release_mclk(struct dai_intel_ssp * dp,uint32_t mclk_id)382 static void dai_ssp_mn_release_mclk(struct dai_intel_ssp *dp, uint32_t mclk_id)
383 {
384 struct dai_intel_ssp_mn *mp = dai_get_mn(dp);
385 k_spinlock_key_t key;
386 uint32_t mdivc;
387
388 key = k_spin_lock(&mp->lock);
389
390 mp->mclk_sources_ref[mclk_id]--;
391
392 /* disable MCLK divider if nobody use it */
393 if (!mp->mclk_sources_ref[mclk_id]) {
394 mdivc = sys_read32(dai_mn_base(dp) + MN_MDIVCTRL);
395
396 mdivc &= ~MN_MDIVCTRL_M_DIV_ENABLE(mclk_id);
397 sys_write32(mdivc, dai_mn_base(dp) + MN_MDIVCTRL);
398 }
399
400 /* release the clock source if all mclks are released */
401 if (!dai_ssp_is_mclk_source_in_use(dp)) {
402 mdivc = sys_read32(dai_mn_base(dp) + MN_MDIVCTRL);
403
404 /* clear source mclk clock - bits 17-16 */
405 mdivc &= ~MCDSS(MN_SOURCE_CLKS_MASK);
406
407 sys_write32(mdivc, dai_mn_base(dp) + MN_MDIVCTRL);
408
409 mp->mclk_source_clock = 0;
410 }
411 k_spin_unlock(&mp->lock, key);
412 }
413
414 #if CONFIG_INTEL_MN
415 /**
416 * \brief Finds valid M/(N * SCR) values for given frequencies.
417 * \param[in] freq SSP clock frequency.
418 * \param[in] bclk Bit clock frequency.
419 * \param[out] out_scr_div SCR divisor.
420 * \param[out] out_m M value of M/N divider.
421 * \param[out] out_n N value of M/N divider.
422 * \return true if found suitable values, false otherwise.
423 */
dai_ssp_find_mn(uint32_t freq,uint32_t bclk,uint32_t * out_scr_div,uint32_t * out_m,uint32_t * out_n)424 static bool dai_ssp_find_mn(uint32_t freq, uint32_t bclk, uint32_t *out_scr_div, uint32_t *out_m,
425 uint32_t *out_n)
426 {
427 uint32_t m, n, mn_div;
428 uint32_t scr_div = freq / bclk;
429
430 LOG_INF("for freq %d bclk %d", freq, bclk);
431 /* check if just SCR is enough */
432 if (freq % bclk == 0 && scr_div < (SSCR0_SCR_MASK >> 8) + 1) {
433 *out_scr_div = scr_div;
434 *out_m = 1;
435 *out_n = 1;
436
437 return true;
438 }
439
440 /* M/(N * scr_div) has to be less than 1/2 */
441 if ((bclk * 2) >= freq) {
442 return false;
443 }
444
445 /* odd SCR gives lower duty cycle */
446 if (scr_div > 1 && scr_div % 2 != 0) {
447 --scr_div;
448 }
449
450 /* clamp to valid SCR range */
451 scr_div = MIN(scr_div, (SSCR0_SCR_MASK >> 8) + 1);
452
453 /* find highest even divisor */
454 while (scr_div > 1 && freq % scr_div != 0) {
455 scr_div -= 2;
456 }
457
458 /* compute M/N with smallest dividend and divisor */
459 mn_div = dai_ssp_gcd(bclk, freq / scr_div);
460
461 m = bclk / mn_div;
462 n = freq / scr_div / mn_div;
463
464 /* M/N values can be up to 24 bits */
465 if (n & (~0xffffff)) {
466 return false;
467 }
468
469 *out_scr_div = scr_div;
470 *out_m = m;
471 *out_n = n;
472
473 LOG_INF("m %d n %d", m, n);
474 return true;
475 }
476
477 /**
478 * \brief Finds index of clock valid for given BCLK rate.
479 * Clock that can use just SCR is preferred.
480 * M/N other than 1/1 is used only if there are no other possibilities.
481 * \param[in] bclk Bit clock frequency.
482 * \param[out] scr_div SCR divisor.
483 * \param[out] m M value of M/N divider.
484 * \param[out] n N value of M/N divider.
485 * \return index of suitable clock if could find it, -EINVAL otherwise.
486 */
dai_ssp_find_bclk_source(struct dai_intel_ssp * dp,uint32_t bclk,uint32_t * scr_div,uint32_t * m,uint32_t * n)487 static int dai_ssp_find_bclk_source(struct dai_intel_ssp *dp, uint32_t bclk, uint32_t *scr_div,
488 uint32_t *m, uint32_t *n)
489 {
490 struct dai_intel_ssp_freq_table *ft = dai_get_ftable(dp);
491 struct dai_intel_ssp_mn *mp = dai_get_mn(dp);
492 int i;
493
494 /* check if we can use MCLK source clock */
495 if (dai_ssp_is_mclk_source_in_use(dp)) {
496 if (dai_ssp_find_mn(ft[mp->mclk_source_clock].freq, bclk, scr_div, m, n)) {
497 return mp->mclk_source_clock;
498 }
499
500 LOG_WRN("BCLK %d warning: cannot use MCLK source %d", bclk,
501 ft[mp->mclk_source_clock].freq);
502 }
503
504 /* searching the smallest possible bclk source */
505 for (i = 0; i <= DAI_INTEL_SSP_MAX_FREQ_INDEX; i++)
506 if (ft[i].freq % bclk == 0) {
507 *scr_div = ft[i].freq / bclk;
508 return i;
509 }
510
511 /* check if we can get target BCLK with M/N */
512 for (i = 0; i <= DAI_INTEL_SSP_MAX_FREQ_INDEX; i++) {
513 if (dai_ssp_find_mn(ft[i].freq, bclk, scr_div, m, n)) {
514 return i;
515 }
516 }
517
518 return -EINVAL;
519 }
520
521 /**
522 * \brief Finds index of SSP clock with the given clock source encoded index.
523 * \return the index in ssp_freq if could find it, -EINVAL otherwise.
524 */
dai_ssp_find_clk_ssp_index(struct dai_intel_ssp * dp,uint32_t src_enc)525 static int dai_ssp_find_clk_ssp_index(struct dai_intel_ssp *dp, uint32_t src_enc)
526 {
527 uint32_t *fs = dai_get_fsources(dp);
528 int i;
529
530 /* searching for the encode value matched bclk source */
531 for (i = 0; i <= DAI_INTEL_SSP_MAX_FREQ_INDEX; i++) {
532 if (fs[i] == src_enc) {
533 return i;
534 }
535 }
536
537 return -EINVAL;
538 }
539
540 /**
541 * \brief Checks if given clock is used as source for any BCLK.
542 * \param[in] clk_src Bit clock source.
543 * \return true if any port use given clock source, false otherwise.
544 */
dai_ssp_is_bclk_source_in_use(struct dai_intel_ssp * dp,enum bclk_source clk_src)545 static bool dai_ssp_is_bclk_source_in_use(struct dai_intel_ssp *dp, enum bclk_source clk_src)
546 {
547 struct dai_intel_ssp_mn *mp = dai_get_mn(dp);
548 bool ret = false;
549 int i;
550
551 for (i = 0; i < ARRAY_SIZE(mp->bclk_sources); i++) {
552 if (mp->bclk_sources[i] == clk_src) {
553 ret = true;
554 break;
555 }
556 }
557
558 return ret;
559 }
560
561 /**
562 * \brief Configures M/N source clock for BCLK.
563 * All ports that use M/N share the same source, so it should be changed
564 * only if there are no other ports using M/N already.
565 * \param[in] bclk Bit clock frequency.
566 * \param[out] scr_div SCR divisor.
567 * \param[out] m M value of M/N divider.
568 * \param[out] n N value of M/N divider.
569 * \return 0 on success, error code otherwise.
570 */
dai_ssp_setup_initial_bclk_mn_source(struct dai_intel_ssp * dp,uint32_t bclk,uint32_t * scr_div,uint32_t * m,uint32_t * n)571 static int dai_ssp_setup_initial_bclk_mn_source(struct dai_intel_ssp *dp, uint32_t bclk,
572 uint32_t *scr_div, uint32_t *m, uint32_t *n)
573 {
574 uint32_t *fs = dai_get_fsources(dp);
575 struct dai_intel_ssp_mn *mp = dai_get_mn(dp);
576 uint32_t mdivc;
577 int clk_index = dai_ssp_find_bclk_source(dp, bclk, scr_div, m, n);
578
579 if (clk_index < 0) {
580 LOG_ERR("BCLK %d, no valid source", bclk);
581 return -EINVAL;
582 }
583
584 mp->bclk_source_mn_clock = clk_index;
585
586 mdivc = sys_read32(dai_mn_base(dp) + MN_MDIVCTRL);
587
588 /* clear source bclk clock - 21-20 bits */
589 mdivc &= ~MNDSS(MN_SOURCE_CLKS_MASK);
590
591 /* select source clock */
592 mdivc |= MNDSS(fs[clk_index]);
593
594 sys_write32(mdivc, dai_mn_base(dp) + MN_MDIVCTRL);
595
596 return 0;
597 }
598
599 /**
600 * \brief Reset M/N source clock for BCLK.
601 * If no port is using bclk, reset to use SSP_CLOCK_XTAL_OSCILLATOR
602 * as the default clock source.
603 */
dai_ssp_reset_bclk_mn_source(struct dai_intel_ssp * dp)604 static void dai_ssp_reset_bclk_mn_source(struct dai_intel_ssp *dp)
605 {
606 struct dai_intel_ssp_mn *mp = dai_get_mn(dp);
607 uint32_t mdivc;
608 int clk_index = dai_ssp_find_clk_ssp_index(dp, DAI_INTEL_SSP_CLOCK_XTAL_OSCILLATOR);
609
610 if (clk_index < 0) {
611 LOG_ERR("BCLK reset failed, no SSP_CLOCK_XTAL_OSCILLATOR source!");
612 return;
613 }
614
615 mdivc = sys_read32(dai_mn_base(dp) + MN_MDIVCTRL);
616
617 /* reset to use XTAL Oscillator */
618 mdivc &= ~MNDSS(MN_SOURCE_CLKS_MASK);
619 mdivc |= MNDSS(DAI_INTEL_SSP_CLOCK_XTAL_OSCILLATOR);
620
621 sys_write32(mdivc, dai_mn_base(dp) + MN_MDIVCTRL);
622
623 mp->bclk_source_mn_clock = clk_index;
624 }
625
626 /**
627 * \brief Finds valid M/(N * SCR) values for source clock that is already locked
628 * because other ports use it.
629 * \param[in] bclk Bit clock frequency.
630 * \param[out] scr_div SCR divisor.
631 * \param[out] m M value of M/N divider.
632 * \param[out] n N value of M/N divider.
633 * \return 0 on success, error code otherwise.
634 */
dai_ssp_setup_current_bclk_mn_source(struct dai_intel_ssp * dp,uint32_t bclk,uint32_t * scr_div,uint32_t * m,uint32_t * n)635 static int dai_ssp_setup_current_bclk_mn_source(struct dai_intel_ssp *dp, uint32_t bclk,
636 uint32_t *scr_div, uint32_t *m, uint32_t *n)
637 {
638 struct dai_intel_ssp_freq_table *ft = dai_get_ftable(dp);
639 struct dai_intel_ssp_mn *mp = dai_get_mn(dp);
640 int ret = 0;
641
642 /* source for M/N is already set, no need to do it */
643 if (dai_ssp_find_mn(ft[mp->bclk_source_mn_clock].freq, bclk, scr_div, m, n)) {
644 goto out;
645 }
646
647 LOG_ERR("BCLK %d, no valid configuration for already selected source = %d",
648 bclk, mp->bclk_source_mn_clock);
649 ret = -EINVAL;
650
651 out:
652
653 return ret;
654 }
655
dai_ssp_check_bclk_xtal_source(uint32_t bclk,bool mn_in_use,uint32_t * scr_div)656 static bool dai_ssp_check_bclk_xtal_source(uint32_t bclk, bool mn_in_use,
657 uint32_t *scr_div)
658 {
659 /* since cAVS 2.0 bypassing XTAL (ECS=0) is not supported */
660 return false;
661 }
662
dai_ssp_mn_set_bclk(struct dai_intel_ssp * dp,uint32_t dai_index,uint32_t bclk_rate,uint32_t * out_scr_div,bool * out_need_ecs)663 static int dai_ssp_mn_set_bclk(struct dai_intel_ssp *dp, uint32_t dai_index, uint32_t bclk_rate,
664 uint32_t *out_scr_div, bool *out_need_ecs)
665 {
666 struct dai_intel_ssp_mn *mp = dai_get_mn(dp);
667 k_spinlock_key_t key;
668 uint32_t m = 1;
669 uint32_t n = 1;
670 int ret = 0;
671 bool mn_in_use;
672
673 key = k_spin_lock(&mp->lock);
674
675 mp->bclk_sources[dai_index] = MN_BCLK_SOURCE_NONE;
676
677 mn_in_use = dai_ssp_is_bclk_source_in_use(dp, MN_BCLK_SOURCE_MN);
678
679 if (dai_ssp_check_bclk_xtal_source(bclk_rate, mn_in_use, out_scr_div)) {
680 mp->bclk_sources[dai_index] = MN_BCLK_SOURCE_XTAL;
681 *out_need_ecs = false;
682 goto out;
683 }
684
685 *out_need_ecs = true;
686
687 if (mn_in_use) {
688 ret = dai_ssp_setup_current_bclk_mn_source(dp, bclk_rate, out_scr_div, &m, &n);
689 } else {
690 ret = dai_ssp_setup_initial_bclk_mn_source(dp, bclk_rate, out_scr_div, &m, &n);
691 }
692
693 if (ret >= 0) {
694 mp->bclk_sources[dai_index] = MN_BCLK_SOURCE_MN;
695
696 LOG_INF("bclk_rate %d, *out_scr_div %d, m %d, n %d", bclk_rate, *out_scr_div, m, n);
697
698 sys_write32(m, dai_mn_base(dp) + MN_MDIV_M_VAL(dai_index));
699 sys_write32(n, dai_mn_base(dp) + MN_MDIV_N_VAL(dai_index));
700 }
701
702 out:
703
704 k_spin_unlock(&mp->lock, key);
705
706 return ret;
707 }
708
dai_ssp_mn_release_bclk(struct dai_intel_ssp * dp,uint32_t ssp_index)709 static void dai_ssp_mn_release_bclk(struct dai_intel_ssp *dp, uint32_t ssp_index)
710 {
711 struct dai_intel_ssp_mn *mp = dai_get_mn(dp);
712 k_spinlock_key_t key;
713 bool mn_in_use;
714
715 key = k_spin_lock(&mp->lock);
716 mp->bclk_sources[ssp_index] = MN_BCLK_SOURCE_NONE;
717
718 mn_in_use = dai_ssp_is_bclk_source_in_use(dp, MN_BCLK_SOURCE_MN);
719 /* release the M/N clock source if not used */
720 if (!mn_in_use) {
721 dai_ssp_reset_bclk_mn_source(dp);
722 }
723
724 k_spin_unlock(&mp->lock, key);
725 }
726
dai_ssp_mn_reset_bclk_divider(struct dai_intel_ssp * dp,uint32_t ssp_index)727 static void dai_ssp_mn_reset_bclk_divider(struct dai_intel_ssp *dp, uint32_t ssp_index)
728 {
729 struct dai_intel_ssp_mn *mp = dai_get_mn(dp);
730 k_spinlock_key_t key;
731
732 key = k_spin_lock(&mp->lock);
733
734 sys_write32(1, dai_mn_base(dp) + MN_MDIV_M_VAL(ssp_index));
735 sys_write32(1, dai_mn_base(dp) + MN_MDIV_N_VAL(ssp_index));
736
737 k_spin_unlock(&mp->lock, key);
738 }
739 #endif
740
dai_ssp_poll_for_register_delay(uint32_t reg,uint32_t mask,uint32_t val,uint64_t us)741 static int dai_ssp_poll_for_register_delay(uint32_t reg, uint32_t mask,
742 uint32_t val, uint64_t us)
743 {
744 if (!WAIT_FOR((sys_read32(reg) & mask) == val, us, k_busy_wait(1))) {
745 LOG_ERR("poll timeout reg %u mask %u val %u us %u", reg, mask, val, (uint32_t)us);
746 return -EIO;
747 }
748
749 return 0;
750 }
751
dai_ssp_pm_runtime_dis_ssp_clk_gating(struct dai_intel_ssp * dp,uint32_t ssp_index)752 static inline void dai_ssp_pm_runtime_dis_ssp_clk_gating(struct dai_intel_ssp *dp,
753 uint32_t ssp_index)
754 {
755 #if CONFIG_DAI_SSP_CLK_FORCE_DYNAMIC_CLOCK_GATING
756 uint32_t shim_reg;
757
758 shim_reg = sys_read32(dai_shim_base(dp) + SHIM_CLKCTL) |
759 (ssp_index < CONFIG_DAI_INTEL_SSP_NUM_BASE ?
760 SHIM_CLKCTL_I2SFDCGB(ssp_index) :
761 SHIM_CLKCTL_I2SEFDCGB(ssp_index -
762 CONFIG_DAI_INTEL_SSP_NUM_BASE));
763
764 sys_write32(shim_reg, dai_shim_base(dp) + SHIM_CLKCTL);
765
766 LOG_INF("ssp_index %d CLKCTL %08x", ssp_index, shim_reg);
767 #endif
768 }
769
dai_ssp_pm_runtime_en_ssp_clk_gating(struct dai_intel_ssp * dp,uint32_t ssp_index)770 static inline void dai_ssp_pm_runtime_en_ssp_clk_gating(struct dai_intel_ssp *dp,
771 uint32_t ssp_index)
772 {
773 #if CONFIG_DAI_SSP_CLK_FORCE_DYNAMIC_CLOCK_GATING
774 uint32_t shim_reg;
775
776 shim_reg = sys_read32(dai_shim_base(dp) + SHIM_CLKCTL) &
777 ~(ssp_index < CONFIG_DAI_INTEL_SSP_NUM_BASE ?
778 SHIM_CLKCTL_I2SFDCGB(ssp_index) :
779 SHIM_CLKCTL_I2SEFDCGB(ssp_index -
780 CONFIG_DAI_INTEL_SSP_NUM_BASE));
781
782 sys_write32(shim_reg, dai_shim_base(dp) + SHIM_CLKCTL);
783
784 LOG_INF("ssp_index %d CLKCTL %08x", ssp_index, shim_reg);
785 #endif
786 }
787
dai_ssp_pm_runtime_en_ssp_power(struct dai_intel_ssp * dp,uint32_t ssp_index)788 static void dai_ssp_pm_runtime_en_ssp_power(struct dai_intel_ssp *dp, uint32_t ssp_index)
789 {
790 #if CONFIG_DAI_SSP_HAS_POWER_CONTROL
791 int ret;
792
793 LOG_INF("SSP%d", ssp_index);
794 #if CONFIG_SOC_INTEL_ACE15_MTPM || CONFIG_SOC_SERIES_INTEL_ADSP_CAVS
795 sys_write32(sys_read32(dai_ip_base(dp) + I2SLCTL_OFFSET) | I2SLCTL_SPA(ssp_index),
796 dai_ip_base(dp) + I2SLCTL_OFFSET);
797
798 /* Check if powered on. */
799 ret = dai_ssp_poll_for_register_delay(dai_ip_base(dp) + I2SLCTL_OFFSET,
800 I2SLCTL_CPA(ssp_index), I2SLCTL_CPA(ssp_index),
801 DAI_INTEL_SSP_MAX_SEND_TIME_PER_SAMPLE);
802 #elif CONFIG_SOC_INTEL_ACE20_LNL || CONFIG_SOC_INTEL_ACE30_PTL
803 sys_write32(sys_read32(dai_hdamlssp_base(dp) + I2SLCTL_OFFSET) |
804 I2SLCTL_SPA(ssp_index),
805 dai_hdamlssp_base(dp) + I2SLCTL_OFFSET);
806 /* Check if powered on. */
807 ret = dai_ssp_poll_for_register_delay(dai_hdamlssp_base(dp) + I2SLCTL_OFFSET,
808 I2SLCTL_CPA(ssp_index), I2SLCTL_CPA(ssp_index),
809 DAI_INTEL_SSP_MAX_SEND_TIME_PER_SAMPLE);
810 #else
811 #error need to define SOC
812 #endif
813 if (ret) {
814 LOG_WRN("SSP%d: timeout", ssp_index);
815 }
816 #else
817 ARG_UNUSED(dp);
818 ARG_UNUSED(ssp_index);
819 #endif /* CONFIG_DAI_SSP_HAS_POWER_CONTROL */
820 }
821
dai_ssp_pm_runtime_dis_ssp_power(struct dai_intel_ssp * dp,uint32_t ssp_index)822 static void dai_ssp_pm_runtime_dis_ssp_power(struct dai_intel_ssp *dp, uint32_t ssp_index)
823 {
824 #if CONFIG_DAI_SSP_HAS_POWER_CONTROL
825 int ret;
826
827 LOG_INF("SSP%d", ssp_index);
828 #if CONFIG_SOC_INTEL_ACE15_MTPM || CONFIG_SOC_SERIES_INTEL_ADSP_CAVS
829 sys_write32(sys_read32(dai_ip_base(dp) + I2SLCTL_OFFSET) & (~I2SLCTL_SPA(ssp_index)),
830 dai_ip_base(dp) + I2SLCTL_OFFSET);
831
832 /* Check if powered off. */
833 ret = dai_ssp_poll_for_register_delay(dai_ip_base(dp) + I2SLCTL_OFFSET,
834 I2SLCTL_CPA(ssp_index), 0,
835 DAI_INTEL_SSP_MAX_SEND_TIME_PER_SAMPLE);
836
837 #elif CONFIG_SOC_INTEL_ACE20_LNL || CONFIG_SOC_INTEL_ACE30_PTL
838 sys_write32(sys_read32(dai_hdamlssp_base(dp) + I2SLCTL_OFFSET) & (~I2SLCTL_SPA(ssp_index)),
839 dai_hdamlssp_base(dp) + I2SLCTL_OFFSET);
840
841 /* Check if powered off. */
842 ret = dai_ssp_poll_for_register_delay(dai_hdamlssp_base(dp) + I2SLCTL_OFFSET,
843 I2SLCTL_CPA(ssp_index), 0,
844 DAI_INTEL_SSP_MAX_SEND_TIME_PER_SAMPLE);
845 #else
846 #error need to define SOC
847 #endif
848 if (ret) {
849 LOG_WRN("SSP%d: timeout", ssp_index);
850 }
851 #else
852 ARG_UNUSED(dp);
853 ARG_UNUSED(ssp_index);
854 #endif /* CONFIG_DAI_SSP_HAS_POWER_CONTROL */
855 }
856
dai_ssp_program_channel_map(struct dai_intel_ssp * dp,const struct dai_config * cfg,uint32_t ssp_index,const void * spec_config)857 static void dai_ssp_program_channel_map(struct dai_intel_ssp *dp,
858 const struct dai_config *cfg, uint32_t ssp_index, const void *spec_config)
859 {
860 #if defined(CONFIG_SOC_INTEL_ACE20_LNL)
861 ARG_UNUSED(spec_config);
862 uint16_t pcmsycm = cfg->link_config;
863 /* Set upper slot number from configuration */
864 pcmsycm = pcmsycm | (dp->ssp_plat_data->params.tdm_slots - 1) << 4;
865
866 if (DAI_INTEL_SSP_IS_BIT_SET(cfg->link_config, 15)) {
867 uint32_t reg_add = dai_ip_base(dp) + 0x1000 * ssp_index + PCMS0CM_OFFSET;
868 /* Program HDA output stream parameters */
869 sys_write16((pcmsycm & 0xffff), reg_add);
870 } else {
871 uint32_t reg_add = dai_ip_base(dp) + 0x1000 * ssp_index + PCMS1CM_OFFSET;
872 /* Program HDA input stream parameters */
873 sys_write16((pcmsycm & 0xffff), reg_add);
874 }
875 #elif defined(CONFIG_SOC_INTEL_ACE30_PTL)
876 const struct dai_intel_ipc4_ssp_configuration_blob *blob = spec_config;
877 uint64_t time_slot_map = 0;
878 uint16_t pcmsycm = cfg->link_config;
879 uint8_t slot_count = 0;
880
881 if (DAI_INTEL_SSP_IS_BIT_SET(cfg->link_config, 15)) {
882 time_slot_map =
883 blob->i2s_driver_config.i2s_config.ssmidytsa[cfg->tdm_slot_group];
884 slot_count = POPCOUNT(time_slot_map >> 32) + POPCOUNT(time_slot_map & 0xFFFFFFFF);
885 pcmsycm = cfg->link_config | (slot_count - 1) << 4;
886 uint32_t reg_add = dai_ip_base(dp) + 0x1000 * ssp_index +
887 PCMSyCM_OFFSET(cfg->tdm_slot_group);
888
889 /* Program HDA output stream parameters */
890 sys_write16((pcmsycm & 0xffff), reg_add);
891
892 } else {
893 time_slot_map =
894 blob->i2s_driver_config.i2s_config.ssmodytsa[cfg->tdm_slot_group];
895 slot_count = POPCOUNT(time_slot_map >> 32) + POPCOUNT(time_slot_map & 0xFFFFFFFF);
896 pcmsycm = cfg->link_config | (slot_count - 1) << 4;
897 uint32_t reg_add = dai_ip_base(dp) + 0x1000 * ssp_index +
898 PCMSyCM_OFFSET(cfg->tdm_slot_group + I2SOPCMC);
899
900 /* Program HDA input stream parameters */
901 sys_write16((pcmsycm & 0xffff), reg_add);
902 }
903 #else
904 ARG_UNUSED(dp);
905 ARG_UNUSED(cfg);
906 ARG_UNUSED(ssp_index);
907 ARG_UNUSED(spec_config);
908 #endif /* CONFIG_SOC_INTEL_ACE20_LNL */
909 }
910
911 /* empty SSP transmit FIFO */
dai_ssp_empty_tx_fifo(struct dai_intel_ssp * dp)912 static void dai_ssp_empty_tx_fifo(struct dai_intel_ssp *dp)
913 {
914 int ret;
915 uint32_t sssr;
916
917 /*
918 * SSSR_TNF is cleared when TX FIFO is empty or full,
919 * so wait for set TNF then for TFL zero - order matter.
920 */
921 #ifdef CONFIG_SOC_INTEL_ACE30_PTL
922 ret = dai_ssp_poll_for_register_delay(dai_base(dp) + SSMODyCS(dp->tdm_slot_group),
923 SSMODyCS_TNF, SSMODyCS_TNF,
924 DAI_INTEL_SSP_MAX_SEND_TIME_PER_SAMPLE);
925
926 ret |= dai_ssp_poll_for_register_delay(dai_base(dp) + SSMODyCS(dp->tdm_slot_group),
927 SSMODyCS_TFL, 0,
928 DAI_INTEL_SSP_MAX_SEND_TIME_PER_SAMPLE *
929 (DAI_INTEL_SSP_FIFO_DEPTH - 1) / 2);
930 #else
931 ret = dai_ssp_poll_for_register_delay(dai_base(dp) + SSSR, SSSR_TNF, SSSR_TNF,
932 DAI_INTEL_SSP_MAX_SEND_TIME_PER_SAMPLE);
933 ret |= dai_ssp_poll_for_register_delay(dai_base(dp) + SSCR3, SSCR3_TFL_MASK, 0,
934 DAI_INTEL_SSP_MAX_SEND_TIME_PER_SAMPLE *
935 (DAI_INTEL_SSP_FIFO_DEPTH - 1) / 2);
936 #endif
937
938 if (ret) {
939 LOG_WRN("timeout");
940 }
941
942 sssr = sys_read32(dai_base(dp) + SSSR);
943
944 /* clear interrupt */
945 if (sssr & SSSR_TUR) {
946 sys_write32(sssr, dai_base(dp) + SSSR);
947 }
948 }
949
950 #ifdef CONFIG_SOC_INTEL_ACE30_PTL
ssp_empty_rx_fifo_on_start(struct dai_intel_ssp * dp)951 static void ssp_empty_rx_fifo_on_start(struct dai_intel_ssp *dp)
952 {
953 uint32_t retry = DAI_INTEL_SSP_RX_FLUSH_RETRY_MAX;
954 uint32_t i, sssr;
955
956 sssr = sys_read32(dai_base(dp) + SSSR);
957
958 if (sssr & SSSR_ROR) {
959 /* The RX FIFO is in overflow condition, empty it */
960 for (uint32_t idx = 0; idx < I2SIPCMC; ++idx) {
961 for (i = 0; i < DAI_INTEL_SSP_FIFO_DEPTH; i++)
962 sys_read32(dai_base(dp) + SSMIDyD(idx));
963 }
964
965 /* Clear the overflow status */
966 dai_ssp_update_bits(dp, SSSR, SSSR_ROR, SSSR_ROR);
967 /* Re-read the SSSR register */
968 sssr = sys_read32(dai_base(dp) + SSSR);
969 }
970
971 for (uint32_t idx = 0; idx < I2SIPCMC; ++idx) {
972 while ((sys_read32(dai_base(dp) + SSMIDyCS(idx)) & SSMIDyCS_RNE) && retry--) {
973 uint32_t entries = SSMIDyCS_RFL_VAL(sys_read32(dai_base(dp) +
974 SSMIDyCS(idx)));
975
976 /* Empty the RX FIFO (the DMA is not running at this point) */
977 for (i = 0; i < entries + 1; i++)
978 sys_read32(dai_base(dp) + SSMIDyD(idx));
979
980 sssr = sys_read32(dai_base(dp) + SSSR);
981 }
982 }
983 }
984
ssp_empty_rx_fifo_on_stop(struct dai_intel_ssp * dp)985 static void ssp_empty_rx_fifo_on_stop(struct dai_intel_ssp *dp)
986 {
987 struct dai_intel_ssp_plat_data *ssp_plat_data = dai_get_plat_data(dp);
988 uint64_t sample_ticks = ssp_plat_data->params.fsync_rate ?
989 1000000 / ssp_plat_data->params.fsync_rate : 0;
990 uint32_t retry = DAI_INTEL_SSP_RX_FLUSH_RETRY_MAX;
991 uint32_t i, sssr, ssmidycs;
992 uint32_t entries[2];
993
994 sssr = sys_read32(dai_base(dp) + SSSR);
995
996 entries[0] = SSMIDyCS_RFL_VAL(sys_read32(dai_base(dp) + SSMIDyCS(dp->tdm_slot_group)));
997
998 while ((sys_read32(dai_base(dp) + SSMIDyCS(dp->tdm_slot_group)) &
999 SSMIDyCS_RNE) && retry--) {
1000 /* Wait one sample time */
1001 k_busy_wait(sample_ticks);
1002
1003 entries[1] = SSMIDyCS_RFL_VAL(sys_read32(dai_base(dp) +
1004 SSMIDyCS(dp->tdm_slot_group)));
1005 sssr = sys_read32(dai_base(dp) + SSSR);
1006 ssmidycs = sys_read32(dai_base(dp) + SSMIDyCS(dp->tdm_slot_group));
1007
1008 if (entries[0] > entries[1]) {
1009 /*
1010 * The DMA is reading the FIFO, check the status in the
1011 * next loop
1012 */
1013 entries[0] = entries[1];
1014 } else if (!(ssmidycs & SSMIDyCS_RFS)) {
1015 /*
1016 * The DMA request is not asserted, read the FIFO
1017 * directly, otherwise let the next loop iteration to
1018 * check the status
1019 */
1020 for (i = 0; i < entries[1] + 1; i++)
1021 sys_read32(dai_base(dp) + SSMIDyD(dp->tdm_slot_group));
1022 }
1023 sssr = sys_read32(dai_base(dp) + SSSR);
1024 }
1025
1026 /* Just in case clear the overflow status */
1027 dai_ssp_update_bits(dp, SSSR, SSSR_ROR, SSSR_ROR);
1028 }
1029
1030 #else
ssp_empty_rx_fifo_on_start(struct dai_intel_ssp * dp)1031 static void ssp_empty_rx_fifo_on_start(struct dai_intel_ssp *dp)
1032 {
1033 uint32_t retry = DAI_INTEL_SSP_RX_FLUSH_RETRY_MAX;
1034 uint32_t i, sssr;
1035
1036 sssr = sys_read32(dai_base(dp) + SSSR);
1037
1038 if (sssr & SSSR_ROR) {
1039 /* The RX FIFO is in overflow condition, empty it */
1040 for (i = 0; i < DAI_INTEL_SSP_FIFO_DEPTH; i++)
1041 sys_read32(dai_base(dp) + SSDR);
1042
1043 /* Clear the overflow status */
1044 dai_ssp_update_bits(dp, SSSR, SSSR_ROR, SSSR_ROR);
1045 /* Re-read the SSSR register */
1046 sssr = sys_read32(dai_base(dp) + SSSR);
1047 }
1048
1049 while ((sssr & SSSR_RNE) && retry--) {
1050 uint32_t entries = SSCR3_RFL_VAL(sys_read32(dai_base(dp) + SSCR3));
1051
1052 /* Empty the RX FIFO (the DMA is not running at this point) */
1053 for (i = 0; i < entries + 1; i++)
1054 sys_read32(dai_base(dp) + SSDR);
1055
1056 sssr = sys_read32(dai_base(dp) + SSSR);
1057 }
1058 }
1059
ssp_empty_rx_fifo_on_stop(struct dai_intel_ssp * dp)1060 static void ssp_empty_rx_fifo_on_stop(struct dai_intel_ssp *dp)
1061 {
1062 struct dai_intel_ssp_plat_data *ssp_plat_data = dai_get_plat_data(dp);
1063 uint64_t sample_ticks = ssp_plat_data->params.fsync_rate ?
1064 1000000 / ssp_plat_data->params.fsync_rate : 0;
1065 uint32_t retry = DAI_INTEL_SSP_RX_FLUSH_RETRY_MAX;
1066 uint32_t entries[2];
1067 uint32_t i, sssr;
1068
1069 sssr = sys_read32(dai_base(dp) + SSSR);
1070 entries[0] = SSCR3_RFL_VAL(sys_read32(dai_base(dp) + SSCR3));
1071
1072 while ((sssr & SSSR_RNE) && retry--) {
1073 /* Wait one sample time */
1074 k_busy_wait(sample_ticks);
1075
1076 entries[1] = SSCR3_RFL_VAL(sys_read32(dai_base(dp) + SSCR3));
1077 sssr = sys_read32(dai_base(dp) + SSSR);
1078
1079 if (entries[0] > entries[1]) {
1080 /*
1081 * The DMA is reading the FIFO, check the status in the
1082 * next loop
1083 */
1084 entries[0] = entries[1];
1085 } else if (!(sssr & SSSR_RFS)) {
1086 /*
1087 * The DMA request is not asserted, read the FIFO
1088 * directly, otherwise let the next loop iteration to
1089 * check the status
1090 */
1091 for (i = 0; i < entries[1] + 1; i++)
1092 sys_read32(dai_base(dp) + SSDR);
1093 }
1094
1095 sssr = sys_read32(dai_base(dp) + SSSR);
1096 }
1097
1098 /* Just in case clear the overflow status */
1099 dai_ssp_update_bits(dp, SSSR, SSSR_ROR, SSSR_ROR);
1100 }
1101
1102 #endif
1103
dai_ssp_mclk_prepare_enable(struct dai_intel_ssp * dp)1104 static int dai_ssp_mclk_prepare_enable(struct dai_intel_ssp *dp)
1105 {
1106 struct dai_intel_ssp_plat_data *ssp_plat_data = dai_get_plat_data(dp);
1107 int ret;
1108
1109 if (ssp_plat_data->clk_active & SSP_CLK_MCLK_ACTIVE) {
1110 return 0;
1111 }
1112
1113 /* MCLK config */
1114 ret = dai_ssp_mn_set_mclk(dp, ssp_plat_data->params.mclk_id,
1115 ssp_plat_data->params.mclk_rate);
1116 if (ret < 0) {
1117 LOG_ERR("invalid mclk_rate = %d for mclk_id = %d", ssp_plat_data->params.mclk_rate,
1118 ssp_plat_data->params.mclk_id);
1119 } else {
1120 ssp_plat_data->clk_active |= SSP_CLK_MCLK_ACTIVE;
1121 }
1122
1123 return ret;
1124 }
1125
dai_ssp_mclk_disable_unprepare(struct dai_intel_ssp * dp)1126 static void dai_ssp_mclk_disable_unprepare(struct dai_intel_ssp *dp)
1127 {
1128 struct dai_intel_ssp_plat_data *ssp_plat_data = dai_get_plat_data(dp);
1129
1130 if (!(ssp_plat_data->clk_active & SSP_CLK_MCLK_ACTIVE)) {
1131 return;
1132 }
1133
1134 dai_ssp_mn_release_mclk(dp, ssp_plat_data->params.mclk_id);
1135
1136 ssp_plat_data->clk_active &= ~SSP_CLK_MCLK_ACTIVE;
1137 }
1138
dai_ssp_bclk_prepare_enable(struct dai_intel_ssp * dp)1139 static int dai_ssp_bclk_prepare_enable(struct dai_intel_ssp *dp)
1140 {
1141 #if !(CONFIG_INTEL_MN)
1142 struct dai_intel_ssp_freq_table *ft = dai_get_ftable(dp);
1143 #endif
1144 struct dai_intel_ssp_plat_data *ssp_plat_data = dai_get_plat_data(dp);
1145 uint32_t sscr0;
1146 uint32_t mdiv;
1147 bool need_ecs = false;
1148 int ret = 0;
1149
1150 if (ssp_plat_data->clk_active & SSP_CLK_BCLK_ACTIVE) {
1151 return 0;
1152 }
1153
1154 sscr0 = sys_read32(dai_base(dp) + SSCR0);
1155
1156 #if CONFIG_INTEL_MN
1157 /* BCLK config */
1158 ret = dai_ssp_mn_set_bclk(dp, dp->dai_index, ssp_plat_data->params.bclk_rate,
1159 &mdiv, &need_ecs);
1160 if (ret < 0) {
1161 LOG_ERR("invalid bclk_rate = %d for ssp_index = %d",
1162 ssp_plat_data->params.bclk_rate, dp->dai_index);
1163 goto out;
1164 }
1165 #else
1166 if (ft[DAI_INTEL_SSP_DEFAULT_IDX].freq % ssp_plat_data->params.bclk_rate != 0) {
1167 LOG_ERR("invalid bclk_rate = %d for ssp_index = %d",
1168 ssp_plat_data->params.bclk_rate, dp->dai_index);
1169 ret = -EINVAL;
1170 goto out;
1171 }
1172
1173 mdiv = ft[DAI_INTEL_SSP_DEFAULT_IDX].freq / ssp_plat_data->params.bclk_rate;
1174 #endif
1175
1176 #ifndef CONFIG_SOC_INTEL_ACE30_PTL
1177 if (need_ecs) {
1178 sscr0 |= SSCR0_ECS;
1179 }
1180 #endif
1181
1182 /* clock divisor is SCR + 1 */
1183 mdiv -= 1;
1184
1185 /* divisor must be within SCR range */
1186 if (mdiv > (SSCR0_SCR_MASK >> 8)) {
1187 LOG_ERR("divisor %d is not within SCR range", mdiv);
1188 ret = -EINVAL;
1189 goto out;
1190 }
1191
1192 /* set the SCR divisor */
1193 sscr0 &= ~SSCR0_SCR_MASK;
1194 sscr0 |= SSCR0_SCR(mdiv);
1195
1196 sys_write32(sscr0, dai_base(dp) + SSCR0);
1197
1198 LOG_INF("sscr0 = 0x%08x", sscr0);
1199 out:
1200 if (!ret) {
1201 ssp_plat_data->clk_active |= SSP_CLK_BCLK_ACTIVE;
1202 }
1203
1204 return ret;
1205 }
1206
dai_ssp_bclk_disable_unprepare(struct dai_intel_ssp * dp)1207 static void dai_ssp_bclk_disable_unprepare(struct dai_intel_ssp *dp)
1208 {
1209 struct dai_intel_ssp_plat_data *ssp_plat_data = dai_get_plat_data(dp);
1210
1211 if (!(ssp_plat_data->clk_active & SSP_CLK_BCLK_ACTIVE)) {
1212 return;
1213 }
1214 #if CONFIG_INTEL_MN
1215 dai_ssp_mn_release_bclk(dp, ssp_plat_data->ssp_index);
1216 #endif
1217 ssp_plat_data->clk_active &= ~SSP_CLK_BCLK_ACTIVE;
1218 }
1219
dai_ssp_log_ssp_data(struct dai_intel_ssp * dp)1220 static void dai_ssp_log_ssp_data(struct dai_intel_ssp *dp)
1221 {
1222 LOG_INF("dai index: %u", dp->dai_index);
1223 LOG_INF("plat_data base: %u", dp->ssp_plat_data->base);
1224 LOG_INF("plat_data irq: %u", dp->ssp_plat_data->irq);
1225 LOG_INF("plat_data fifo playback offset: %u",
1226 dp->ssp_plat_data->fifo[DAI_DIR_PLAYBACK].offset);
1227 LOG_INF("plat_data fifo playback handshake: %u",
1228 dp->ssp_plat_data->fifo[DAI_DIR_PLAYBACK].handshake);
1229 LOG_INF("plat_data fifo capture offset: %u",
1230 dp->ssp_plat_data->fifo[DAI_DIR_CAPTURE].offset);
1231 LOG_INF("plat_data fifo capture handshake: %u",
1232 dp->ssp_plat_data->fifo[DAI_DIR_CAPTURE].handshake);
1233 }
1234
1235 /* Digital Audio interface formatting */
dai_ssp_set_config_tplg(struct dai_intel_ssp * dp,const struct dai_config * config,const void * bespoke_cfg)1236 static int dai_ssp_set_config_tplg(struct dai_intel_ssp *dp, const struct dai_config *config,
1237 const void *bespoke_cfg)
1238 {
1239 struct dai_intel_ssp_pdata *ssp = dai_get_drvdata(dp);
1240 struct dai_intel_ssp_plat_data *ssp_plat_data = dai_get_plat_data(dp);
1241 struct dai_intel_ssp_freq_table *ft = dai_get_ftable(dp);
1242 uint32_t sscr0;
1243 uint32_t sscr1;
1244 uint32_t sscr2;
1245 uint32_t sscr3;
1246 uint32_t sspsp;
1247 uint32_t sspsp2;
1248 uint32_t sstsa;
1249 uint32_t ssrsa;
1250 uint32_t ssto;
1251 uint32_t ssioc;
1252 uint32_t bdiv;
1253 uint32_t data_size;
1254 uint32_t frame_end_padding;
1255 uint32_t slot_end_padding;
1256 uint32_t frame_len = 0;
1257 uint32_t bdiv_min;
1258 uint32_t tft;
1259 uint32_t rft;
1260 uint32_t active_tx_slots = 2;
1261 uint32_t active_rx_slots = 2;
1262 uint32_t sample_width = 2;
1263
1264 bool inverted_bclk = false;
1265 bool inverted_frame = false;
1266 bool cfs = false;
1267 bool start_delay = false;
1268 k_spinlock_key_t key;
1269 int ret = 0;
1270
1271 dai_ssp_log_ssp_data(dp);
1272
1273 key = k_spin_lock(&dp->lock);
1274
1275 /* ignore config if SSP is already configured */
1276 if (dp->state[DAI_DIR_PLAYBACK] > DAI_STATE_READY ||
1277 dp->state[DAI_DIR_CAPTURE] > DAI_STATE_READY) {
1278 if (!memcmp(&ssp_plat_data->params, bespoke_cfg,
1279 sizeof(struct dai_intel_ipc3_ssp_params))) {
1280 LOG_INF("Already configured. Ignore config");
1281 goto clk;
1282 }
1283
1284 if (ssp_plat_data->clk_active &
1285 (SSP_CLK_MCLK_ACTIVE | SSP_CLK_BCLK_ACTIVE)) {
1286 LOG_WRN("SSP active, cannot change config");
1287 goto clk;
1288 }
1289
1290 /* safe to proceed and change HW config */
1291 }
1292
1293 LOG_INF("SSP%d", dp->dai_index);
1294
1295 /* reset SSP settings */
1296 /* sscr0 dynamic settings are DSS, EDSS, SCR, FRDC, ECS */
1297 /*
1298 * FIXME: MOD, ACS, NCS are not set,
1299 * no support for network mode for now
1300 */
1301 sscr0 = SSCR0_PSP | SSCR0_RIM | SSCR0_TIM;
1302
1303 /* sscr1 dynamic settings are SFRMDIR, SCLKDIR, SCFR, RSRE, TSRE */
1304 sscr1 = SSCR1_TTE | SSCR1_TTELP | SSCR1_TRAIL;
1305
1306 /* sscr2 dynamic setting is LJDFD */
1307 sscr2 = SSCR2_SDFD | SSCR2_TURM1;
1308
1309 /* sscr3 dynamic settings are TFT, RFT */
1310 sscr3 = 0;
1311
1312 /* sspsp dynamic settings are SCMODE, SFRMP, DMYSTRT, SFRMWDTH */
1313 sspsp = 0;
1314
1315 ssp->config = *config;
1316 memcpy(&ssp_plat_data->params, bespoke_cfg, sizeof(struct dai_intel_ipc3_ssp_params));
1317
1318 /* sspsp2 no dynamic setting */
1319 sspsp2 = 0x0;
1320
1321 /* ssioc dynamic setting is SFCR */
1322 ssioc = SSIOC_SCOE;
1323
1324 /* ssto no dynamic setting */
1325 ssto = 0x0;
1326
1327 /* sstsa dynamic setting is TTSA, default 2 slots */
1328 sstsa = SSTSA_SSTSA(ssp_plat_data->params.tx_slots);
1329
1330 /* ssrsa dynamic setting is RTSA, default 2 slots */
1331 ssrsa = SSRSA_SSRSA(ssp_plat_data->params.rx_slots);
1332
1333 switch (config->format & DAI_INTEL_IPC3_SSP_FMT_CLOCK_PROVIDER_MASK) {
1334 case DAI_INTEL_IPC3_SSP_FMT_CBP_CFP:
1335 sscr1 |= SSCR1_SCLKDIR | SSCR1_SFRMDIR;
1336 break;
1337 case DAI_INTEL_IPC3_SSP_FMT_CBC_CFC:
1338 sscr1 |= SSCR1_SCFR;
1339 cfs = true;
1340 break;
1341 case DAI_INTEL_IPC3_SSP_FMT_CBP_CFC:
1342 sscr1 |= SSCR1_SCLKDIR;
1343 /* FIXME: this mode has not been tested */
1344
1345 cfs = true;
1346 break;
1347 case DAI_INTEL_IPC3_SSP_FMT_CBC_CFP:
1348 sscr1 |= SSCR1_SCFR | SSCR1_SFRMDIR;
1349 /* FIXME: this mode has not been tested */
1350 break;
1351 default:
1352 LOG_ERR("format & PROVIDER_MASK EINVAL");
1353 ret = -EINVAL;
1354 goto out;
1355 }
1356
1357 /* clock signal polarity */
1358 switch (config->format & DAI_INTEL_IPC3_SSP_FMT_INV_MASK) {
1359 case DAI_INTEL_IPC3_SSP_FMT_NB_NF:
1360 break;
1361 case DAI_INTEL_IPC3_SSP_FMT_NB_IF:
1362 inverted_frame = true; /* handled later with format */
1363 break;
1364 case DAI_INTEL_IPC3_SSP_FMT_IB_IF:
1365 inverted_bclk = true; /* handled later with bclk idle */
1366 inverted_frame = true; /* handled later with format */
1367 break;
1368 case DAI_INTEL_IPC3_SSP_FMT_IB_NF:
1369 inverted_bclk = true; /* handled later with bclk idle */
1370 break;
1371 default:
1372 LOG_ERR("format & INV_MASK EINVAL");
1373 ret = -EINVAL;
1374 goto out;
1375 }
1376
1377 /* supporting bclk idle state */
1378 if (ssp_plat_data->params.clks_control & DAI_INTEL_IPC3_SSP_CLKCTRL_BCLK_IDLE_HIGH) {
1379 /* bclk idle state high */
1380 sspsp |= SSPSP_SCMODE((inverted_bclk ^ 0x3) & 0x3);
1381 } else {
1382 /* bclk idle state low */
1383 sspsp |= SSPSP_SCMODE(inverted_bclk);
1384 }
1385
1386 sscr0 |= SSCR0_MOD | SSCR0_ACS;
1387
1388 /* Additional hardware settings */
1389
1390 /* Receiver Time-out Interrupt Disabled/Enabled */
1391 sscr1 |= (ssp_plat_data->params.quirks & DAI_INTEL_IPC3_SSP_QUIRK_TINTE) ?
1392 SSCR1_TINTE : 0;
1393
1394 /* Peripheral Trailing Byte Interrupts Disable/Enable */
1395 sscr1 |= (ssp_plat_data->params.quirks & DAI_INTEL_IPC3_SSP_QUIRK_PINTE) ?
1396 SSCR1_PINTE : 0;
1397
1398 /* Enable/disable internal loopback. Output of transmit serial
1399 * shifter connected to input of receive serial shifter, internally.
1400 */
1401 sscr1 |= (ssp_plat_data->params.quirks & DAI_INTEL_IPC3_SSP_QUIRK_LBM) ?
1402 SSCR1_LBM : 0;
1403
1404 if (ssp_plat_data->params.quirks & DAI_INTEL_IPC3_SSP_QUIRK_LBM) {
1405 LOG_INF("going for loopback!");
1406 } else {
1407 LOG_INF("no loopback!");
1408 }
1409
1410 /* Transmit data are driven at the same/opposite clock edge specified
1411 * in SSPSP.SCMODE[1:0]
1412 */
1413 sscr2 |= (ssp_plat_data->params.quirks & DAI_INTEL_IPC3_SSP_QUIRK_SMTATF) ?
1414 SSCR2_SMTATF : 0;
1415
1416 /* Receive data are sampled at the same/opposite clock edge specified
1417 * in SSPSP.SCMODE[1:0]
1418 */
1419 sscr2 |= (ssp_plat_data->params.quirks & DAI_INTEL_IPC3_SSP_QUIRK_MMRATF) ?
1420 SSCR2_MMRATF : 0;
1421
1422 /* Enable/disable the fix for PSP consumer mode TXD wait for frame
1423 * de-assertion before starting the second channel
1424 */
1425 sscr2 |= (ssp_plat_data->params.quirks & DAI_INTEL_IPC3_SSP_QUIRK_PSPSTWFDFD) ?
1426 SSCR2_PSPSTWFDFD : 0;
1427
1428 /* Enable/disable the fix for PSP provider mode FSRT with dummy stop &
1429 * frame end padding capability
1430 */
1431 sscr2 |= (ssp_plat_data->params.quirks & DAI_INTEL_IPC3_SSP_QUIRK_PSPSRWFDFD) ?
1432 SSCR2_PSPSRWFDFD : 0;
1433
1434 if (!ssp_plat_data->params.mclk_rate ||
1435 ssp_plat_data->params.mclk_rate > ft[DAI_INTEL_SSP_MAX_FREQ_INDEX].freq) {
1436 LOG_ERR("invalid MCLK = %d Hz (valid < %d)", ssp_plat_data->params.mclk_rate,
1437 ft[DAI_INTEL_SSP_MAX_FREQ_INDEX].freq);
1438 ret = -EINVAL;
1439 goto out;
1440 }
1441
1442 if (!ssp_plat_data->params.bclk_rate ||
1443 ssp_plat_data->params.bclk_rate > ssp_plat_data->params.mclk_rate) {
1444 LOG_ERR("BCLK %d Hz = 0 or > MCLK %d Hz", ssp_plat_data->params.bclk_rate,
1445 ssp_plat_data->params.mclk_rate);
1446 ret = -EINVAL;
1447 goto out;
1448 }
1449
1450 /* calc frame width based on BCLK and rate - must be divisible */
1451 if (ssp_plat_data->params.bclk_rate % ssp_plat_data->params.fsync_rate) {
1452 LOG_ERR("BCLK %d is not divisible by rate %d", ssp_plat_data->params.bclk_rate,
1453 ssp_plat_data->params.fsync_rate);
1454 ret = -EINVAL;
1455 goto out;
1456 }
1457
1458 /* must be enough BCLKs for data */
1459 bdiv = ssp_plat_data->params.bclk_rate / ssp_plat_data->params.fsync_rate;
1460 if (bdiv < ssp_plat_data->params.tdm_slot_width * ssp_plat_data->params.tdm_slots) {
1461 LOG_ERR("not enough BCLKs need %d",
1462 ssp_plat_data->params.tdm_slot_width * ssp_plat_data->params.tdm_slots);
1463 ret = -EINVAL;
1464 goto out;
1465 }
1466
1467 /* tdm_slot_width must be <= 38 for SSP */
1468 if (ssp_plat_data->params.tdm_slot_width > 38) {
1469 LOG_ERR("tdm_slot_width %d > 38", ssp_plat_data->params.tdm_slot_width);
1470 ret = -EINVAL;
1471 goto out;
1472 }
1473
1474 bdiv_min = ssp_plat_data->params.tdm_slots *
1475 (ssp_plat_data->params.tdm_per_slot_padding_flag ?
1476 ssp_plat_data->params.tdm_slot_width : ssp_plat_data->params.sample_valid_bits);
1477 if (bdiv < bdiv_min) {
1478 LOG_ERR("bdiv(%d) < bdiv_min(%d)", bdiv, bdiv_min);
1479 ret = -EINVAL;
1480 goto out;
1481 }
1482
1483 frame_end_padding = bdiv - bdiv_min;
1484 if (frame_end_padding > SSPSP2_FEP_MASK) {
1485 LOG_ERR("frame_end_padding too big: %u", frame_end_padding);
1486 ret = -EINVAL;
1487 goto out;
1488 }
1489
1490 /* format */
1491 switch (config->format & DAI_INTEL_IPC3_SSP_FMT_FORMAT_MASK) {
1492 case DAI_INTEL_IPC3_SSP_FMT_I2S:
1493
1494 start_delay = true;
1495
1496 sscr0 |= SSCR0_FRDC(ssp_plat_data->params.tdm_slots);
1497
1498 if (bdiv % 2) {
1499 LOG_ERR("bdiv %d is not divisible by 2", bdiv);
1500 ret = -EINVAL;
1501 goto out;
1502 }
1503
1504 /* set asserted frame length to half frame length */
1505 frame_len = bdiv / 2;
1506
1507 /*
1508 * handle frame polarity, I2S default is falling/active low,
1509 * non-inverted(inverted_frame=0) -- active low(SFRMP=0),
1510 * inverted(inverted_frame=1) -- rising/active high(SFRMP=1),
1511 * so, we should set SFRMP to inverted_frame.
1512 */
1513 sspsp |= SSPSP_SFRMP(inverted_frame);
1514
1515 /*
1516 * for I2S/LEFT_J, the padding has to happen at the end
1517 * of each slot
1518 */
1519 if (frame_end_padding % 2) {
1520 LOG_ERR("frame_end_padding %d is not divisible by 2", frame_end_padding);
1521 ret = -EINVAL;
1522 goto out;
1523 }
1524
1525 slot_end_padding = frame_end_padding / 2;
1526
1527 if (slot_end_padding > DAI_INTEL_IPC3_SSP_SLOT_PADDING_MAX) {
1528 /* too big padding */
1529 LOG_ERR("slot_end_padding > %d", DAI_INTEL_IPC3_SSP_SLOT_PADDING_MAX);
1530 ret = -EINVAL;
1531 goto out;
1532 }
1533
1534 sspsp |= SSPSP_DMYSTOP(slot_end_padding);
1535 slot_end_padding >>= SSPSP_DMYSTOP_BITS;
1536 sspsp |= SSPSP_EDMYSTOP(slot_end_padding);
1537
1538 break;
1539
1540 case DAI_INTEL_IPC3_SSP_FMT_LEFT_J:
1541
1542 /* default start_delay value is set to false */
1543
1544 sscr0 |= SSCR0_FRDC(ssp_plat_data->params.tdm_slots);
1545
1546 /* LJDFD enable */
1547 sscr2 &= ~SSCR2_LJDFD;
1548
1549 if (bdiv % 2) {
1550 LOG_ERR("bdiv %d is not divisible by 2", bdiv);
1551 ret = -EINVAL;
1552 goto out;
1553 }
1554
1555 /* set asserted frame length to half frame length */
1556 frame_len = bdiv / 2;
1557
1558 /*
1559 * handle frame polarity, LEFT_J default is rising/active high,
1560 * non-inverted(inverted_frame=0) -- active high(SFRMP=1),
1561 * inverted(inverted_frame=1) -- falling/active low(SFRMP=0),
1562 * so, we should set SFRMP to !inverted_frame.
1563 */
1564 sspsp |= SSPSP_SFRMP(!inverted_frame);
1565
1566 /*
1567 * for I2S/LEFT_J, the padding has to happen at the end
1568 * of each slot
1569 */
1570 if (frame_end_padding % 2) {
1571 LOG_ERR("frame_end_padding %d is not divisible by 2", frame_end_padding);
1572 ret = -EINVAL;
1573 goto out;
1574 }
1575
1576 slot_end_padding = frame_end_padding / 2;
1577
1578 if (slot_end_padding > 15) {
1579 /* can't handle padding over 15 bits */
1580 LOG_ERR("slot_end_padding %d > 15 bits", slot_end_padding);
1581 ret = -EINVAL;
1582 goto out;
1583 }
1584
1585 sspsp |= SSPSP_DMYSTOP(slot_end_padding);
1586 slot_end_padding >>= SSPSP_DMYSTOP_BITS;
1587 sspsp |= SSPSP_EDMYSTOP(slot_end_padding);
1588
1589 break;
1590 case DAI_INTEL_IPC3_SSP_FMT_DSP_A:
1591
1592 start_delay = true;
1593
1594 /* fallthrough */
1595
1596 case DAI_INTEL_IPC3_SSP_FMT_DSP_B:
1597
1598 /* default start_delay value is set to false */
1599
1600 sscr0 |= SSCR0_MOD | SSCR0_FRDC(ssp_plat_data->params.tdm_slots);
1601
1602 /* set asserted frame length */
1603 frame_len = 1; /* default */
1604
1605 if (cfs && ssp_plat_data->params.frame_pulse_width > 0 &&
1606 ssp_plat_data->params.frame_pulse_width <=
1607 DAI_INTEL_IPC3_SSP_FRAME_PULSE_WIDTH_MAX) {
1608 frame_len = ssp_plat_data->params.frame_pulse_width;
1609 }
1610
1611 /* frame_pulse_width must less or equal 38 */
1612 if (ssp_plat_data->params.frame_pulse_width >
1613 DAI_INTEL_IPC3_SSP_FRAME_PULSE_WIDTH_MAX) {
1614 LOG_ERR("frame_pulse_width > %d", DAI_INTEL_IPC3_SSP_FRAME_PULSE_WIDTH_MAX);
1615 ret = -EINVAL;
1616 goto out;
1617 }
1618 /*
1619 * handle frame polarity, DSP_B default is rising/active high,
1620 * non-inverted(inverted_frame=0) -- active high(SFRMP=1),
1621 * inverted(inverted_frame=1) -- falling/active low(SFRMP=0),
1622 * so, we should set SFRMP to !inverted_frame.
1623 */
1624 sspsp |= SSPSP_SFRMP(!inverted_frame);
1625
1626 active_tx_slots = POPCOUNT(ssp_plat_data->params.tx_slots);
1627 active_rx_slots = POPCOUNT(ssp_plat_data->params.rx_slots);
1628
1629 /*
1630 * handle TDM mode, TDM mode has padding at the end of
1631 * each slot. The amount of padding is equal to result of
1632 * subtracting slot width and valid bits per slot.
1633 */
1634 if (ssp_plat_data->params.tdm_per_slot_padding_flag) {
1635 frame_end_padding = bdiv - ssp_plat_data->params.tdm_slots *
1636 ssp_plat_data->params.tdm_slot_width;
1637
1638 slot_end_padding = ssp_plat_data->params.tdm_slot_width -
1639 ssp_plat_data->params.sample_valid_bits;
1640
1641 if (slot_end_padding >
1642 DAI_INTEL_IPC3_SSP_SLOT_PADDING_MAX) {
1643 LOG_ERR("slot_end_padding > %d",
1644 DAI_INTEL_IPC3_SSP_SLOT_PADDING_MAX);
1645 ret = -EINVAL;
1646 goto out;
1647 }
1648
1649 sspsp |= SSPSP_DMYSTOP(slot_end_padding);
1650 slot_end_padding >>= SSPSP_DMYSTOP_BITS;
1651 sspsp |= SSPSP_EDMYSTOP(slot_end_padding);
1652 }
1653
1654 sspsp2 |= (frame_end_padding & SSPSP2_FEP_MASK);
1655
1656 break;
1657 default:
1658 LOG_ERR("invalid format 0x%04x", config->format);
1659 ret = -EINVAL;
1660 goto out;
1661 }
1662
1663 if (start_delay) {
1664 sspsp |= SSPSP_FSRT;
1665 }
1666
1667 sspsp |= SSPSP_SFRMWDTH(frame_len);
1668
1669 data_size = ssp_plat_data->params.sample_valid_bits;
1670
1671 if (data_size > 16) {
1672 sscr0 |= (SSCR0_EDSS | SSCR0_DSIZE(data_size - 16));
1673 } else {
1674 sscr0 |= SSCR0_DSIZE(data_size);
1675 }
1676
1677 /* setting TFT and RFT */
1678 switch (ssp_plat_data->params.sample_valid_bits) {
1679 case 16:
1680 /* use 2 bytes for each slot */
1681 sample_width = 2;
1682 break;
1683 case 24:
1684 case 32:
1685 /* use 4 bytes for each slot */
1686 sample_width = 4;
1687 break;
1688 default:
1689 LOG_ERR("sample_valid_bits %d", ssp_plat_data->params.sample_valid_bits);
1690 ret = -EINVAL;
1691 goto out;
1692 }
1693
1694 tft = MIN(DAI_INTEL_SSP_FIFO_DEPTH - DAI_INTEL_SSP_FIFO_WATERMARK,
1695 sample_width * active_tx_slots);
1696 rft = MIN(DAI_INTEL_SSP_FIFO_DEPTH - DAI_INTEL_SSP_FIFO_WATERMARK,
1697 sample_width * active_rx_slots);
1698
1699 sscr3 |= SSCR3_TX(tft) | SSCR3_RX(rft);
1700
1701 sys_write32(sscr0, dai_base(dp) + SSCR0);
1702 sys_write32(sscr1, dai_base(dp) + SSCR1);
1703 sys_write32(sscr2, dai_base(dp) + SSCR2);
1704 sys_write32(sscr3, dai_base(dp) + SSCR3);
1705 sys_write32(sspsp, dai_base(dp) + SSPSP);
1706 sys_write32(sspsp2, dai_base(dp) + SSPSP2);
1707 sys_write32(ssioc, dai_base(dp) + SSIOC);
1708 sys_write32(ssto, dai_base(dp) + SSTO);
1709 #ifdef CONFIG_SOC_INTEL_ACE30_PTL
1710 for (uint32_t idx = 0; idx < I2SIPCMC; ++idx) {
1711 sys_write64(sstsa, dai_base(dp) + SSMODyTSA(idx));
1712 }
1713
1714 for (uint32_t idx = 0; idx < I2SOPCMC; ++idx) {
1715 sys_write64(ssrsa, dai_base(dp) + SSMIDyTSA(idx));
1716 }
1717 #else
1718 sys_write32(sstsa, dai_base(dp) + SSTSA);
1719 sys_write32(ssrsa, dai_base(dp) + SSRSA);
1720 #endif
1721
1722 LOG_INF("sscr0 = 0x%08x, sscr1 = 0x%08x, ssto = 0x%08x, sspsp = 0x%0x",
1723 sscr0, sscr1, ssto, sspsp);
1724 LOG_INF("sscr2 = 0x%08x, sspsp2 = 0x%08x, sscr3 = 0x%08x, ssioc = 0x%08x",
1725 sscr2, sspsp2, sscr3, ssioc);
1726 LOG_INF("ssrsa = 0x%08x, sstsa = 0x%08x",
1727 ssrsa, sstsa);
1728
1729 dp->state[DAI_DIR_PLAYBACK] = DAI_STATE_PRE_RUNNING;
1730 dp->state[DAI_DIR_CAPTURE] = DAI_STATE_PRE_RUNNING;
1731
1732 clk:
1733 switch (config->options & DAI_INTEL_IPC3_SSP_CONFIG_FLAGS_CMD_MASK) {
1734 case DAI_INTEL_IPC3_SSP_CONFIG_FLAGS_HW_PARAMS:
1735 if (ssp_plat_data->params.clks_control & DAI_INTEL_IPC3_SSP_CLKCTRL_MCLK_ES) {
1736 ret = dai_ssp_mclk_prepare_enable(dp);
1737 if (ret < 0) {
1738 goto out;
1739 }
1740
1741 ssp_plat_data->clk_active |= SSP_CLK_MCLK_ES_REQ;
1742
1743 LOG_INF("hw_params stage: enabled MCLK clocks for SSP%d...",
1744 dp->dai_index);
1745 }
1746
1747 if (ssp_plat_data->params.clks_control & DAI_INTEL_IPC3_SSP_CLKCTRL_BCLK_ES) {
1748 bool enable_sse = false;
1749
1750 if (!(ssp_plat_data->clk_active & SSP_CLK_BCLK_ACTIVE)) {
1751 enable_sse = true;
1752 }
1753
1754 ret = dai_ssp_bclk_prepare_enable(dp);
1755 if (ret < 0) {
1756 goto out;
1757 }
1758
1759 ssp_plat_data->clk_active |= SSP_CLK_BCLK_ES_REQ;
1760
1761 if (enable_sse) {
1762 #ifdef CONFIG_SOC_INTEL_ACE30_PTL
1763 dai_ssp_update_bits(dp, SSMIDyCS(dp->tdm_slot_group),
1764 SSMIDyCS_RSRE, SSMIDyCS_RSRE);
1765 dai_ssp_update_bits(dp, SSMODyCS(dp->tdm_slot_group),
1766 SSMODyCS_TSRE, SSMODyCS_TSRE);
1767 #endif
1768 /* enable port */
1769 dai_ssp_update_bits(dp, SSCR0, SSCR0_SSE, SSCR0_SSE);
1770
1771 LOG_INF("SSE set for SSP%d", ssp_plat_data->ssp_index);
1772 }
1773
1774 LOG_INF("hw_params stage: enabled BCLK clocks for SSP%d...",
1775 ssp_plat_data->ssp_index);
1776 }
1777 break;
1778 case DAI_INTEL_IPC3_SSP_CONFIG_FLAGS_HW_FREE:
1779 /* disable SSP port if no users */
1780 if (dp->state[DAI_DIR_CAPTURE] != DAI_STATE_PRE_RUNNING ||
1781 dp->state[DAI_DIR_PLAYBACK] != DAI_STATE_PRE_RUNNING) {
1782 LOG_INF("hw_free stage: ignore since SSP%d still in use",
1783 dp->dai_index);
1784 break;
1785 }
1786
1787 if (ssp_plat_data->params.clks_control & DAI_INTEL_IPC3_SSP_CLKCTRL_BCLK_ES) {
1788 LOG_INF("hw_free stage: releasing BCLK clocks for SSP%d...",
1789 dp->dai_index);
1790 if (ssp_plat_data->clk_active & SSP_CLK_BCLK_ACTIVE) {
1791 #ifdef CONFIG_SOC_INTEL_ACE30_PTL
1792 for (uint32_t idx = 0; idx < I2SOPCMC; ++idx) {
1793 dai_ssp_update_bits(dp, SSMODyCS(idx), SSMODyCS_TSRE, 0);
1794 }
1795
1796 for (uint32_t idx = 0; idx < I2SIPCMC; ++idx) {
1797 dai_ssp_update_bits(dp, SSMIDyCS(idx), SSMIDyCS_RSRE, 0);
1798 }
1799 #endif
1800 dai_ssp_update_bits(dp, SSCR0, SSCR0_SSE, 0);
1801 LOG_INF("SSE clear for SSP%d", dp->dai_index);
1802 }
1803 dai_ssp_bclk_disable_unprepare(dp);
1804 ssp_plat_data->clk_active &= ~SSP_CLK_BCLK_ES_REQ;
1805 }
1806 if (ssp_plat_data->params.clks_control & DAI_INTEL_IPC3_SSP_CLKCTRL_MCLK_ES) {
1807 LOG_INF("hw_free stage: releasing MCLK clocks for SSP%d...",
1808 dp->dai_index);
1809 dai_ssp_mclk_disable_unprepare(dp);
1810 ssp_plat_data->clk_active &= ~SSP_CLK_MCLK_ES_REQ;
1811 }
1812 break;
1813 default:
1814 break;
1815 }
1816 out:
1817
1818 k_spin_unlock(&dp->lock, key);
1819
1820 return ret;
1821 }
1822
dai_ssp_check_aux_data(struct ssp_intel_aux_tlv * aux_tlv,int aux_len,int parsed_len)1823 static int dai_ssp_check_aux_data(struct ssp_intel_aux_tlv *aux_tlv, int aux_len, int parsed_len)
1824 {
1825 struct ssp_intel_sync_ctl *sync;
1826 int size, size_left;
1827
1828 switch (aux_tlv->type) {
1829 case SSP_MN_DIVIDER_CONTROLS:
1830 size = sizeof(struct ssp_intel_mn_ctl);
1831 break;
1832 case SSP_DMA_CLK_CONTROLS:
1833 size = sizeof(struct ssp_intel_clk_ctl);
1834 break;
1835 case SSP_DMA_TRANSMISSION_START:
1836 case SSP_DMA_TRANSMISSION_STOP:
1837 size = sizeof(struct ssp_intel_tr_ctl);
1838 case SSP_DMA_ALWAYS_RUNNING_MODE:
1839 size = sizeof(struct ssp_intel_run_ctl);
1840 break;
1841 case SSP_DMA_SYNC_DATA:
1842 size = sizeof(struct ssp_intel_sync_ctl);
1843 sync = (struct ssp_intel_sync_ctl *)&aux_tlv->val;
1844 size += sync->count * sizeof(struct ssp_intel_node_ctl);
1845 break;
1846 case SSP_DMA_CLK_CONTROLS_EXT:
1847 size = sizeof(struct ssp_intel_ext_ctl);
1848 break;
1849 case SSP_LINK_CLK_SOURCE:
1850 #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE
1851 size = sizeof(struct ssp_intel_link_ctl);
1852 break;
1853 #else
1854 return 0;
1855 #endif
1856 default:
1857 LOG_ERR("undefined aux data type %u", aux_tlv->type);
1858 return -EINVAL;
1859 }
1860
1861 /* check for malformed struct, size greater than aux_data or described in tlv */
1862 size_left = aux_len - parsed_len - sizeof(struct ssp_intel_aux_tlv);
1863 if (size > size_left || size != aux_tlv->size) {
1864 LOG_ERR("malformed struct, size %d, size_left %d, tlv_size %d", size,
1865 size_left, aux_tlv->size);
1866 return -EINVAL;
1867 }
1868
1869 return 0;
1870 }
1871
1872 /**
1873 * This function checks if the provided buffer contains valid DMA control
1874 * TLV (Type-Length-Value) entries. It ensures that only specific types
1875 * of DMA control settings are allowed to be modified at runtime.
1876 */
dai_ssp_check_dma_control(const uint8_t * aux_ptr,int aux_len)1877 static int dai_ssp_check_dma_control(const uint8_t *aux_ptr, int aux_len)
1878 {
1879 int hop;
1880 struct ssp_intel_aux_tlv *aux_tlv;
1881
1882 for (int i = 0; i < aux_len; i += hop) {
1883 aux_tlv = (struct ssp_intel_aux_tlv *)(aux_ptr);
1884 switch (aux_tlv->type) {
1885 case SSP_DMA_CLK_CONTROLS:
1886 case SSP_DMA_TRANSMISSION_START:
1887 case SSP_DMA_TRANSMISSION_STOP:
1888 case SSP_DMA_ALWAYS_RUNNING_MODE:
1889 case SSP_DMA_SYNC_DATA:
1890 case SSP_DMA_CLK_CONTROLS_EXT:
1891 case SSP_LINK_CLK_SOURCE:
1892 break;
1893 default:
1894 LOG_ERR("incorect config type %u", aux_tlv->type);
1895 return -EINVAL;
1896 }
1897
1898 hop = aux_tlv->size + sizeof(struct ssp_intel_aux_tlv);
1899 aux_ptr += hop;
1900 }
1901
1902 return 0;
1903 }
1904
dai_ssp_parse_tlv(struct dai_intel_ssp * dp,const uint8_t * aux_ptr,size_t aux_len)1905 static int dai_ssp_parse_tlv(struct dai_intel_ssp *dp, const uint8_t *aux_ptr, size_t aux_len)
1906 {
1907 int hop, i, j;
1908 struct ssp_intel_aux_tlv *aux_tlv;
1909 struct ssp_intel_mn_ctl *mn;
1910 struct ssp_intel_clk_ctl *clk;
1911 struct ssp_intel_tr_ctl *tr;
1912 struct ssp_intel_run_ctl *run;
1913 struct ssp_intel_node_ctl *node;
1914 struct ssp_intel_sync_ctl *sync;
1915 struct ssp_intel_ext_ctl *ext;
1916 #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE
1917 struct ssp_intel_link_ctl *link;
1918 #endif
1919
1920 for (i = 0; i < aux_len; i += hop) {
1921 aux_tlv = (struct ssp_intel_aux_tlv *)(aux_ptr);
1922 if (dai_ssp_check_aux_data(aux_tlv, aux_len, i)) {
1923 return -EINVAL;
1924 }
1925 switch (aux_tlv->type) {
1926 case SSP_MN_DIVIDER_CONTROLS:
1927 mn = (struct ssp_intel_mn_ctl *)&aux_tlv->val;
1928 LOG_INF("mn div_m %u, div_n %u", mn->div_m, mn->div_n);
1929 break;
1930 case SSP_DMA_CLK_CONTROLS:
1931 clk = (struct ssp_intel_clk_ctl *)&aux_tlv->val;
1932 LOG_INF("clk start %u, stop %u", clk->start, clk->stop);
1933 break;
1934 case SSP_DMA_TRANSMISSION_START:
1935 case SSP_DMA_TRANSMISSION_STOP:
1936 tr = (struct ssp_intel_tr_ctl *)&aux_tlv->val;
1937 LOG_INF("tr sampling frequency %u, bit_depth %u, channel_map %u,",
1938 tr->sampling_frequency, tr->bit_depth, tr->channel_map);
1939 LOG_INF("channel_config %u, interleaving_style %u, format %u",
1940 tr->channel_config, tr->interleaving_style, tr->format);
1941 break;
1942 case SSP_DMA_ALWAYS_RUNNING_MODE:
1943 run = (struct ssp_intel_run_ctl *)&aux_tlv->val;
1944 LOG_INF("run enabled %u", run->enabled);
1945 break;
1946 case SSP_DMA_SYNC_DATA:
1947 sync = (struct ssp_intel_sync_ctl *)&aux_tlv->val;
1948 LOG_INF("sync sync_denominator %u, count %u",
1949 sync->sync_denominator, sync->count);
1950 node = (struct ssp_intel_node_ctl *)((uint8_t *)sync +
1951 sizeof(struct ssp_intel_sync_ctl));
1952 for (j = 0; j < sync->count; j++) {
1953 LOG_INF("node node_id %u, sampling_rate %u",
1954 node->node_id, node->sampling_rate);
1955 node++;
1956 }
1957 break;
1958 case SSP_DMA_CLK_CONTROLS_EXT:
1959 ext = (struct ssp_intel_ext_ctl *)&aux_tlv->val;
1960 LOG_INF("ext ext_data %u", ext->ext_data);
1961 break;
1962 case SSP_LINK_CLK_SOURCE:
1963 #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE
1964 link = (struct ssp_intel_link_ctl *)&aux_tlv->val;
1965
1966 #if CONFIG_SOC_INTEL_ACE15_MTPM
1967 sys_write32(sys_read32(dai_ip_base(dp) + I2SLCTL_OFFSET) |
1968 I2CLCTL_MLCS(link->clock_source), dai_ip_base(dp) +
1969 I2SLCTL_OFFSET);
1970 #elif CONFIG_SOC_INTEL_ACE20_LNL || CONFIG_SOC_INTEL_ACE30_PTL
1971 sys_write32(sys_read32(dai_i2svss_base(dp) + I2SLCTL_OFFSET) |
1972 I2CLCTL_MLCS(link->clock_source), dai_i2svss_base(dp) +
1973 I2SLCTL_OFFSET);
1974 #endif
1975 LOG_INF("link clock_source %u", link->clock_source);
1976 #endif
1977 break;
1978 default:
1979 LOG_ERR("undefined aux data type %u", aux_tlv->type);
1980 return -EINVAL;
1981 }
1982
1983 hop = aux_tlv->size + sizeof(struct ssp_intel_aux_tlv);
1984 aux_ptr += hop;
1985 }
1986
1987 return 0;
1988 }
1989
dai_ssp_parse_aux_data(struct dai_intel_ssp * dp,const void * spec_config)1990 static int dai_ssp_parse_aux_data(struct dai_intel_ssp *dp, const void *spec_config)
1991 {
1992 const struct dai_intel_ipc4_ssp_configuration_blob_ver_1_5 *blob = spec_config;
1993 int cfg_len, pre_aux_len, aux_len;
1994 uint8_t *aux_ptr;
1995
1996 cfg_len = blob->size;
1997 pre_aux_len = sizeof(*blob) + blob->i2s_mclk_control.mdivrcnt * sizeof(uint32_t);
1998 aux_len = cfg_len - pre_aux_len;
1999 aux_ptr = (uint8_t *)blob + pre_aux_len;
2000
2001 if (aux_len <= 0)
2002 return 0;
2003
2004 return dai_ssp_parse_tlv(dp, aux_ptr, aux_len);
2005 }
2006
dai_ssp_set_clock_control_ver_1_5(struct dai_intel_ssp * dp,const struct dai_intel_ipc4_ssp_mclk_config_2 * cc)2007 static int dai_ssp_set_clock_control_ver_1_5(struct dai_intel_ssp *dp,
2008 const struct dai_intel_ipc4_ssp_mclk_config_2 *cc)
2009 {
2010 /* currently we only support 1 divider */
2011 if (cc->mdivrcnt != 1) {
2012 LOG_ERR("bad clock divider count %u", cc->mdivrcnt);
2013 return -EINVAL;
2014 }
2015
2016 /* ssp blob is set by pcm_hw_params for ipc4 stream, so enable
2017 * mclk and bclk at this time.
2018 */
2019 dai_ssp_mn_set_mclk_blob(dp, cc->mdivctlr, cc->mdivr[0]);
2020
2021 return 0;
2022 }
2023
dai_ssp_set_clock_control_ver_1(struct dai_intel_ssp * dp,const struct dai_intel_ipc4_ssp_mclk_config * cc)2024 static int dai_ssp_set_clock_control_ver_1(struct dai_intel_ssp *dp,
2025 const struct dai_intel_ipc4_ssp_mclk_config *cc)
2026 {
2027 /* ssp blob is set by pcm_hw_params for ipc4 stream, so enable
2028 * mclk and bclk at this time.
2029 */
2030 dai_ssp_mn_set_mclk_blob(dp, cc->mdivc, cc->mdivr);
2031
2032 return 0;
2033 }
2034
dai_ssp_set_reg_config(struct dai_intel_ssp * dp,const struct dai_config * cfg,const struct dai_intel_ipc4_ssp_config * regs)2035 static void dai_ssp_set_reg_config(struct dai_intel_ssp *dp, const struct dai_config *cfg,
2036 const struct dai_intel_ipc4_ssp_config *regs)
2037 {
2038 struct dai_intel_ssp_plat_data *ssp_plat_data = dai_get_plat_data(dp);
2039 uint32_t sscr1 = 0;
2040 uint32_t sstsa = 0;
2041 uint32_t ssrsa = 0;
2042 uint32_t ssc0 = regs->ssc0;
2043
2044 #ifdef CONFIG_SOC_INTEL_ACE30_PTL
2045 sscr1 = regs->ssc1 & ~(SSCR1_RSVD21);
2046 #else
2047 sscr1 = regs->ssc1 & ~(SSCR1_RSRE | SSCR1_TSRE);
2048 sstsa = SSTSA_GET(regs->sstsa);
2049 ssrsa = SSRSA_GET(regs->ssrsa);
2050 #endif
2051
2052 LOG_INF("SSP%d configuration:", dp->dai_index);
2053 #ifndef CONFIG_SOC_INTEL_ACE30_PTL
2054 if (regs->sstsa & SSTSA_TXEN || regs->ssrsa & SSRSA_RXEN ||
2055 regs->ssc1 & (SSCR1_RSRE | SSCR1_TSRE)) {
2056 LOG_INF(" Ignoring %s%s%s%sfrom blob",
2057 regs->sstsa & SSTSA_TXEN ? "SSTSA:TXEN " : "",
2058 regs->ssrsa & SSRSA_RXEN ? "SSRSA:RXEN " : "",
2059 regs->ssc1 & SSCR1_TSRE ? "SSCR1:TSRE " : "",
2060 regs->ssc1 & SSCR1_RSRE ? "SSCR1:RSRE " : "");
2061 }
2062 #endif
2063
2064 sys_write32(ssc0, dai_base(dp) + SSCR0);
2065 sys_write32(regs->ssc2 & ~SSCR2_SFRMEN, dai_base(dp) + SSCR2); /* hardware specific flow */
2066 sys_write32(sscr1, dai_base(dp) + SSCR1);
2067 sys_write32(regs->ssc2 | SSCR2_SFRMEN, dai_base(dp) + SSCR2); /* hardware specific flow */
2068 sys_write32(regs->ssc2, dai_base(dp) + SSCR2);
2069 #ifndef CONFIG_SOC_INTEL_ACE30_PTL
2070 sys_write32(regs->ssc3, dai_base(dp) + SSCR3);
2071 #endif
2072 sys_write32(regs->sspsp, dai_base(dp) + SSPSP);
2073 sys_write32(regs->sspsp2, dai_base(dp) + SSPSP2);
2074 sys_write32(regs->ssioc, dai_base(dp) + SSIOC);
2075 sys_write32(regs->sscto, dai_base(dp) + SSTO);
2076 #ifdef CONFIG_SOC_INTEL_ACE30_PTL
2077 for (uint32_t idx = 0; idx < I2SIPCMC; ++idx) {
2078 sys_write64(regs->ssmidytsa[idx], dai_base(dp) + SSMIDyTSA(idx));
2079 }
2080
2081 for (uint32_t idx = 0; idx < I2SOPCMC; ++idx) {
2082 sys_write64(regs->ssmodytsa[idx], dai_base(dp) + SSMODyTSA(idx));
2083 }
2084 #else
2085 sys_write32(sstsa, dai_base(dp) + SSTSA);
2086 sys_write32(ssrsa, dai_base(dp) + SSRSA);
2087 #endif
2088
2089 LOG_INF(" sscr0 = 0x%08x, sscr1 = 0x%08x, ssto = 0x%08x, sspsp = 0x%0x",
2090 ssc0, sscr1, regs->sscto, regs->sspsp);
2091 LOG_INF(" sscr2 = 0x%08x, sspsp2 = 0x%08x, sscr3 = 0x%08x",
2092 regs->ssc2, regs->sspsp2, regs->ssc3);
2093 LOG_INF(" ssioc = 0x%08x, ssrsa = 0x%08x, sstsa = 0x%08x",
2094 regs->ssioc, ssrsa, sstsa);
2095
2096 ssp_plat_data->params.sample_valid_bits = SSCR0_DSIZE_GET(ssc0);
2097 if (ssc0 & SSCR0_EDSS) {
2098 ssp_plat_data->params.sample_valid_bits += 16;
2099 }
2100 #ifdef CONFIG_SOC_INTEL_ACE30_PTL
2101 ssp_plat_data->params.tx_slots = regs->ssmodytsa[dp->tdm_slot_group];
2102 ssp_plat_data->params.rx_slots = regs->ssmidytsa[dp->tdm_slot_group];
2103 #else
2104 ssp_plat_data->params.tdm_slots = SSCR0_FRDC_GET(ssc0);
2105 ssp_plat_data->params.tx_slots = SSTSA_GET(sstsa);
2106 ssp_plat_data->params.rx_slots = SSRSA_GET(ssrsa);
2107 #endif
2108 ssp_plat_data->params.fsync_rate = cfg->rate;
2109 dp->state[DAI_DIR_PLAYBACK] = DAI_STATE_PRE_RUNNING;
2110 dp->state[DAI_DIR_CAPTURE] = DAI_STATE_PRE_RUNNING;
2111 }
2112
dai_ssp_set_config_blob(struct dai_intel_ssp * dp,const struct dai_config * cfg,const void * spec_config)2113 static int dai_ssp_set_config_blob(struct dai_intel_ssp *dp, const struct dai_config *cfg,
2114 const void *spec_config)
2115 {
2116 const struct dai_intel_ipc4_ssp_configuration_blob_ver_1_5 *blob15 = spec_config;
2117 const struct dai_intel_ipc4_ssp_configuration_blob *blob = spec_config;
2118 struct dai_intel_ssp_plat_data *ssp_plat_data = dai_get_plat_data(dp);
2119 int err;
2120
2121 #ifdef CONFIG_SOC_INTEL_ACE30_PTL
2122 dp->tdm_slot_group = cfg->tdm_slot_group;
2123 #endif
2124
2125 /* set config only once for playback or capture */
2126 if (ssp_plat_data->is_initialized) {
2127 dp->state[DAI_DIR_PLAYBACK] = DAI_STATE_PRE_RUNNING;
2128 dp->state[DAI_DIR_CAPTURE] = DAI_STATE_PRE_RUNNING;
2129 return 0;
2130 }
2131
2132 if (blob15->version == SSP_BLOB_VER_1_5) {
2133 err = dai_ssp_parse_aux_data(dp, spec_config);
2134 if (err)
2135 return err;
2136 dai_ssp_set_reg_config(dp, cfg, &blob15->i2s_ssp_config);
2137 err = dai_ssp_set_clock_control_ver_1_5(dp, &blob15->i2s_mclk_control);
2138 if (err)
2139 return err;
2140 } else {
2141 dai_ssp_set_reg_config(dp, cfg, &blob->i2s_driver_config.i2s_config);
2142 err = dai_ssp_set_clock_control_ver_1(dp, &blob->i2s_driver_config.mclk_config);
2143 if (err)
2144 return err;
2145 }
2146
2147 ssp_plat_data->clk_active |= SSP_CLK_MCLK_ES_REQ;
2148
2149 /* enable port */
2150 dai_ssp_update_bits(dp, SSCR0, SSCR0_SSE, SSCR0_SSE);
2151 ssp_plat_data->clk_active |= SSP_CLK_BCLK_ES_REQ;
2152 ssp_plat_data->is_initialized = true;
2153
2154 return 0;
2155 }
2156
2157 /*
2158 * Portion of the SSP configuration should be applied just before the
2159 * SSP dai is activated, for either power saving or params runtime
2160 * configurable flexibility.
2161 */
dai_ssp_pre_start(struct dai_intel_ssp * dp)2162 static int dai_ssp_pre_start(struct dai_intel_ssp *dp)
2163 {
2164 struct dai_intel_ssp_plat_data *ssp_plat_data = dai_get_plat_data(dp);
2165 int ret = 0;
2166
2167 /*
2168 * We will test if mclk/bclk is configured in
2169 * ssp_mclk/bclk_prepare_enable/disable functions
2170 */
2171 if (!(ssp_plat_data->clk_active & SSP_CLK_MCLK_ES_REQ)) {
2172 /* MCLK config */
2173 ret = dai_ssp_mclk_prepare_enable(dp);
2174 if (ret < 0) {
2175 return ret;
2176 }
2177 }
2178
2179 if (!(ssp_plat_data->clk_active & SSP_CLK_BCLK_ES_REQ)) {
2180 ret = dai_ssp_bclk_prepare_enable(dp);
2181 }
2182
2183 return ret;
2184 }
2185
2186 /*
2187 * For power saving, we should do kinds of power release when the SSP
2188 * dai is changed to inactive, though the runtime param configuration
2189 * don't have to be reset.
2190 */
dai_ssp_post_stop(struct dai_intel_ssp * dp)2191 static void dai_ssp_post_stop(struct dai_intel_ssp *dp)
2192 {
2193 struct dai_intel_ssp_plat_data *ssp_plat_data = dai_get_plat_data(dp);
2194
2195 /* release clocks if SSP is inactive */
2196 if (dp->state[DAI_DIR_PLAYBACK] != DAI_STATE_RUNNING &&
2197 dp->state[DAI_DIR_CAPTURE] != DAI_STATE_RUNNING) {
2198 if (!(ssp_plat_data->clk_active & SSP_CLK_BCLK_ES_REQ)) {
2199 LOG_INF("releasing BCLK clocks for SSP%d...", dp->dai_index);
2200 dai_ssp_bclk_disable_unprepare(dp);
2201 }
2202 if (!(ssp_plat_data->clk_active & SSP_CLK_MCLK_ES_REQ)) {
2203 LOG_INF("releasing MCLK clocks for SSP%d...", dp->dai_index);
2204 dai_ssp_mclk_disable_unprepare(dp);
2205 }
2206 }
2207 }
2208
dai_ssp_early_start(struct dai_intel_ssp * dp,int direction)2209 static void dai_ssp_early_start(struct dai_intel_ssp *dp, int direction)
2210 {
2211 struct dai_intel_ssp_plat_data *ssp_plat_data = dai_get_plat_data(dp);
2212 k_spinlock_key_t key;
2213
2214 key = k_spin_lock(&dp->lock);
2215
2216 /* RX fifo must be cleared before start */
2217 if (direction == DAI_DIR_CAPTURE) {
2218 LOG_INF("SSP%d RX", dp->dai_index);
2219 ssp_empty_rx_fifo_on_start(dp);
2220 } else {
2221 LOG_INF("SSP%d TX", dp->dai_index);
2222 }
2223
2224 /* request mclk/bclk */
2225 dai_ssp_pre_start(dp);
2226
2227 if (!(ssp_plat_data->clk_active & SSP_CLK_BCLK_ES_REQ)) {
2228 /* enable port */
2229 LOG_INF("SSP%d: set SSE", dp->dai_index);
2230 dai_ssp_update_bits(dp, SSCR0, SSCR0_SSE, SSCR0_SSE);
2231 }
2232
2233 k_spin_unlock(&dp->lock, key);
2234 }
2235
2236 /* start the SSP for either playback or capture */
dai_ssp_start(struct dai_intel_ssp * dp,int direction)2237 static void dai_ssp_start(struct dai_intel_ssp *dp, int direction)
2238 {
2239 struct dai_intel_ssp_pdata *ssp = dai_get_drvdata(dp);
2240 k_spinlock_key_t key;
2241
2242 key = k_spin_lock(&dp->lock);
2243
2244
2245 /* enable DMA */
2246 #if CONFIG_SOC_INTEL_ACE30_PTL
2247 if (direction == DAI_DIR_PLAYBACK) {
2248 dai_ssp_update_bits(dp, SSMODyCS(dp->tdm_slot_group),
2249 SSMODyCS_TSRE, SSMODyCS_TSRE);
2250 dai_ssp_update_bits(dp, SSMODyCS(dp->tdm_slot_group),
2251 SSMODyCS_TXEN, SSMODyCS_TXEN);
2252 } else {
2253 dai_ssp_update_bits(dp, SSMIDyCS(dp->tdm_slot_group),
2254 SSMIDyCS_RSRE, SSMIDyCS_RSRE);
2255 dai_ssp_update_bits(dp, SSMIDyCS(dp->tdm_slot_group),
2256 SSMIDyCS_RXEN, SSMIDyCS_RXEN);
2257 }
2258 #else
2259 if (direction == DAI_DIR_PLAYBACK) {
2260 LOG_INF("SSP%d TX", dp->dai_index);
2261 dai_ssp_update_bits(dp, SSCR1, SSCR1_TSRE, SSCR1_TSRE);
2262 dai_ssp_update_bits(dp, SSTSA, SSTSA_TXEN, SSTSA_TXEN);
2263 } else {
2264 LOG_INF("SSP%d RX", dp->dai_index);
2265 dai_ssp_update_bits(dp, SSCR1, SSCR1_RSRE, SSCR1_RSRE);
2266 dai_ssp_update_bits(dp, SSRSA, SSRSA_RXEN, SSRSA_RXEN);
2267 }
2268 #endif
2269
2270 dp->state[direction] = DAI_STATE_RUNNING;
2271
2272 /*
2273 * Wait to get valid fifo status in clock consumer mode. TODO it's
2274 * uncertain which SSP clock consumer modes need the delay atm, but
2275 * these can be added here when confirmed.
2276 */
2277 switch (ssp->config.format & DAI_INTEL_IPC3_SSP_FMT_CLOCK_PROVIDER_MASK) {
2278 case DAI_INTEL_IPC3_SSP_FMT_CBC_CFC:
2279 break;
2280 default:
2281 /* delay for all SSP consumed clocks atm - see above */
2282 /* ssp_wait_delay(PLATFORM_SSP_DELAY); */
2283 k_busy_wait(DAI_INTEL_SSP_PLATFORM_DELAY_US);
2284 break;
2285 }
2286
2287 k_spin_unlock(&dp->lock, key);
2288 }
2289
2290 /* stop the SSP for either playback or capture */
dai_ssp_stop(struct dai_intel_ssp * dp,int direction)2291 static void dai_ssp_stop(struct dai_intel_ssp *dp, int direction)
2292 {
2293 struct dai_intel_ssp_pdata *ssp = dai_get_drvdata(dp);
2294 k_spinlock_key_t key;
2295
2296 key = k_spin_lock(&dp->lock);
2297
2298 /*
2299 * Wait to get valid fifo status in clock consumer mode. TODO it's
2300 * uncertain which SSP clock consumer modes need the delay atm, but
2301 * these can be added here when confirmed.
2302 */
2303 switch (ssp->config.format & DAI_INTEL_IPC3_SSP_FMT_CLOCK_PROVIDER_MASK) {
2304 case DAI_INTEL_IPC3_SSP_FMT_CBC_CFC:
2305 break;
2306 default:
2307 /* delay for all SSP consumed clocks atm - see above */
2308 k_busy_wait(DAI_INTEL_SSP_PLATFORM_DELAY_US);
2309 break;
2310 }
2311
2312 /* stop Rx if neeed */
2313 if (direction == DAI_DIR_CAPTURE &&
2314 dp->state[DAI_DIR_CAPTURE] != DAI_STATE_PRE_RUNNING) {
2315 LOG_INF("SSP%d RX", dp->dai_index);
2316 #if CONFIG_SOC_INTEL_ACE30_PTL
2317 dai_ssp_update_bits(dp, SSMIDyCS(dp->tdm_slot_group), SSMIDyCS_RXEN, 0);
2318 dai_ssp_update_bits(dp, SSMIDyCS(dp->tdm_slot_group), SSMIDyCS_RSRE, 0);
2319 #else
2320 dai_ssp_update_bits(dp, SSRSA, SSRSA_RXEN, 0);
2321 dai_ssp_update_bits(dp, SSCR1, SSCR1_RSRE, 0);
2322 #endif
2323 ssp_empty_rx_fifo_on_stop(dp);
2324 dp->state[DAI_DIR_CAPTURE] = DAI_STATE_PRE_RUNNING;
2325 }
2326
2327 /* stop Tx if needed */
2328 if (direction == DAI_DIR_PLAYBACK &&
2329 dp->state[DAI_DIR_PLAYBACK] != DAI_STATE_PRE_RUNNING) {
2330 LOG_INF("SSP%d TX", dp->dai_index);
2331 #if CONFIG_SOC_INTEL_ACE30_PTL
2332 dai_ssp_update_bits(dp, SSMODyCS(dp->tdm_slot_group), SSMODyCS_TSRE, 0);
2333 dai_ssp_empty_tx_fifo(dp);
2334 dai_ssp_update_bits(dp, SSMODyCS(dp->tdm_slot_group), SSMODyCS_TXEN, 0);
2335 #else
2336 dai_ssp_update_bits(dp, SSCR1, SSCR1_TSRE, 0);
2337 dai_ssp_empty_tx_fifo(dp);
2338 dai_ssp_update_bits(dp, SSTSA, SSTSA_TXEN, 0);
2339 #endif
2340 dp->state[DAI_DIR_PLAYBACK] = DAI_STATE_PRE_RUNNING;
2341 }
2342
2343 k_spin_unlock(&dp->lock, key);
2344 }
2345
dai_ssp_pause(struct dai_intel_ssp * dp,int direction)2346 static void dai_ssp_pause(struct dai_intel_ssp *dp, int direction)
2347 {
2348 if (direction == DAI_DIR_CAPTURE) {
2349 LOG_INF("SSP%d RX", dp->dai_index);
2350 } else {
2351 LOG_INF("SSP%d TX", dp->dai_index);
2352 }
2353
2354 dp->state[direction] = DAI_STATE_PAUSED;
2355 }
2356
dai_ssp_trigger(const struct device * dev,enum dai_dir dir,enum dai_trigger_cmd cmd)2357 static int dai_ssp_trigger(const struct device *dev, enum dai_dir dir,
2358 enum dai_trigger_cmd cmd)
2359 {
2360 struct dai_intel_ssp *dp = (struct dai_intel_ssp *)dev->data;
2361 int array_index = SSP_ARRAY_INDEX(dir);
2362
2363 LOG_DBG("SSP%d: cmd %d", dp->dai_index, cmd);
2364
2365 switch (cmd) {
2366 case DAI_TRIGGER_START:
2367 if (dp->state[array_index] == DAI_STATE_PAUSED ||
2368 dp->state[array_index] == DAI_STATE_PRE_RUNNING) {
2369 dai_ssp_start(dp, array_index);
2370 }
2371 break;
2372 case DAI_TRIGGER_STOP:
2373 dai_ssp_stop(dp, array_index);
2374 break;
2375 case DAI_TRIGGER_PAUSE:
2376 dai_ssp_pause(dp, array_index);
2377 break;
2378 case DAI_TRIGGER_PRE_START:
2379 dai_ssp_early_start(dp, array_index);
2380 break;
2381 default:
2382 break;
2383 }
2384
2385 return 0;
2386 }
2387
dai_ssp_config_get(const struct device * dev,struct dai_config * cfg,enum dai_dir dir)2388 static int dai_ssp_config_get(const struct device *dev, struct dai_config *cfg, enum dai_dir dir)
2389 {
2390 struct dai_config *params = (struct dai_config *)dev->config;
2391 struct dai_intel_ssp *dp = (struct dai_intel_ssp *)dev->data;
2392 struct dai_intel_ssp_pdata *ssp = dai_get_drvdata(dp);
2393 struct dai_intel_ssp_plat_data *ssp_plat_data = dai_get_plat_data(dp);
2394
2395 if (!cfg) {
2396 return -EINVAL;
2397 }
2398
2399 if (!ssp) {
2400 *cfg = *params;
2401 return 0;
2402 }
2403
2404 params->rate = ssp_plat_data->params.fsync_rate;
2405
2406 if (dir == DAI_DIR_PLAYBACK) {
2407 params->channels = POPCOUNT(ssp_plat_data->params.tx_slots);
2408 } else {
2409 params->channels = POPCOUNT(ssp_plat_data->params.rx_slots);
2410 }
2411
2412 params->word_size = ssp_plat_data->params.sample_valid_bits;
2413
2414 *cfg = *params;
2415
2416 return 0;
2417 }
2418
dai_ssp_config_set(const struct device * dev,const struct dai_config * cfg,const void * bespoke_cfg)2419 static int dai_ssp_config_set(const struct device *dev, const struct dai_config *cfg,
2420 const void *bespoke_cfg)
2421 {
2422 struct dai_intel_ssp *dp = (struct dai_intel_ssp *)dev->data;
2423 struct dai_intel_ssp_plat_data *ssp_plat_data = dai_get_plat_data(dp);
2424 int ret;
2425
2426 if (cfg->type == DAI_INTEL_SSP) {
2427 ret = dai_ssp_set_config_tplg(dp, cfg, bespoke_cfg);
2428 } else {
2429 ret = dai_ssp_set_config_blob(dp, cfg, bespoke_cfg);
2430 }
2431 dai_ssp_program_channel_map(dp, cfg, ssp_plat_data->ssp_index, bespoke_cfg);
2432
2433 return ret;
2434 }
2435
dai_ssp_get_properties(const struct device * dev,enum dai_dir dir,int stream_id)2436 static const struct dai_properties *dai_ssp_get_properties(const struct device *dev,
2437 enum dai_dir dir, int stream_id)
2438 {
2439 struct dai_intel_ssp *dp = (struct dai_intel_ssp *)dev->data;
2440 struct dai_intel_ssp_pdata *ssp = dai_get_drvdata(dp);
2441 struct dai_intel_ssp_plat_data *ssp_plat_data = dai_get_plat_data(dp);
2442 struct dai_properties *prop = &ssp->props;
2443 int array_index = SSP_ARRAY_INDEX(dir);
2444
2445 prop->fifo_address = ssp_plat_data->fifo[array_index].offset;
2446 prop->dma_hs_id = ssp_plat_data->fifo[array_index].handshake;
2447
2448 if (ssp_plat_data->clk_active & SSP_CLK_BCLK_ACTIVE) {
2449 prop->reg_init_delay = 0;
2450 } else {
2451 prop->reg_init_delay = ssp_plat_data->params.bclk_delay;
2452 }
2453
2454 LOG_INF("SSP%u: fifo %u, handshake %u, init delay %u", dp->dai_index, prop->fifo_address,
2455 prop->dma_hs_id, prop->reg_init_delay);
2456
2457 return prop;
2458 }
2459
ssp_acquire_ip(struct dai_intel_ssp * dp)2460 static void ssp_acquire_ip(struct dai_intel_ssp *dp)
2461 {
2462 struct dai_intel_ssp_plat_data *ssp = dai_get_plat_data(dp);
2463
2464 ssp->acquire_count++;
2465
2466 if (ssp->acquire_count == 1) {
2467 /* Enable SSP power */
2468 dai_ssp_pm_runtime_en_ssp_power(dp, ssp->ssp_index);
2469
2470 /* Disable dynamic clock gating before touching any register */
2471 dai_ssp_pm_runtime_dis_ssp_clk_gating(dp, ssp->ssp_index);
2472 }
2473 }
2474
ssp_release_ip(struct dai_intel_ssp * dp)2475 static void ssp_release_ip(struct dai_intel_ssp *dp)
2476 {
2477 struct dai_intel_ssp_plat_data *ssp = dai_get_plat_data(dp);
2478
2479 if (ssp->acquire_count == 0) {
2480 return;
2481 }
2482
2483 --ssp->acquire_count;
2484
2485 if (ssp->acquire_count == 0) {
2486 /* disable SSP port if no users */
2487 if (dp->state[DAI_DIR_CAPTURE] == DAI_STATE_PRE_RUNNING &&
2488 dp->state[DAI_DIR_PLAYBACK] == DAI_STATE_PRE_RUNNING &&
2489 COND_CODE_1(CONFIG_INTEL_ADSP_CAVS,
2490 (!(ssp->clk_active & SSP_CLK_BCLK_ES_REQ)), (true))) {
2491 dai_ssp_update_bits(dp, SSCR0, SSCR0_SSE, 0);
2492 LOG_INF("%s SSE clear SSP%d", __func__, ssp->ssp_index);
2493 }
2494
2495 dai_ssp_post_stop(dp);
2496
2497 dai_ssp_pm_runtime_en_ssp_clk_gating(dp, ssp->ssp_index);
2498
2499 dai_ssp_mclk_disable_unprepare(dp);
2500 dai_ssp_bclk_disable_unprepare(dp);
2501
2502 /* Disable SSP power */
2503 dai_ssp_pm_runtime_dis_ssp_power(dp, ssp->ssp_index);
2504 ssp->is_initialized = false;
2505 }
2506 }
2507
dai_ssp_probe(struct dai_intel_ssp * dp)2508 static int dai_ssp_probe(struct dai_intel_ssp *dp)
2509 {
2510 struct dai_intel_ssp_plat_data *ssp_plat_data = dai_get_plat_data(dp);
2511 struct dai_intel_ssp_pdata *ssp;
2512
2513 if (dai_get_drvdata(dp)) {
2514 return -EEXIST; /* already created */
2515 }
2516
2517 /* allocate private data */
2518 ssp = k_calloc(1, sizeof(*ssp));
2519 if (!ssp) {
2520 LOG_ERR("SSP%d: alloc failed", ssp_plat_data->ssp_index);
2521 return -ENOMEM;
2522 }
2523 dai_set_drvdata(dp, ssp);
2524
2525 dp->state[DAI_DIR_PLAYBACK] = DAI_STATE_READY;
2526 dp->state[DAI_DIR_CAPTURE] = DAI_STATE_READY;
2527
2528 #if CONFIG_INTEL_MN
2529 /* Reset M/N, power-gating functions need it */
2530 dai_ssp_mn_reset_bclk_divider(dp, ssp_plat_data->ssp_index);
2531 #endif
2532
2533 ssp_acquire_ip(dp);
2534
2535 return 0;
2536 }
2537
dai_ssp_remove(struct dai_intel_ssp * dp)2538 static int dai_ssp_remove(struct dai_intel_ssp *dp)
2539 {
2540 ssp_release_ip(dp);
2541
2542 k_free(dai_get_drvdata(dp));
2543 dai_set_drvdata(dp, NULL);
2544
2545 return 0;
2546 }
2547
ssp_pm_action(const struct device * dev,enum pm_device_action action)2548 static int ssp_pm_action(const struct device *dev, enum pm_device_action action)
2549 {
2550 struct dai_intel_ssp *dp = (struct dai_intel_ssp *)dev->data;
2551
2552 switch (action) {
2553 case PM_DEVICE_ACTION_SUSPEND:
2554 dai_ssp_remove(dp);
2555 break;
2556 case PM_DEVICE_ACTION_RESUME:
2557 dai_ssp_probe(dp);
2558 break;
2559 case PM_DEVICE_ACTION_TURN_OFF:
2560 case PM_DEVICE_ACTION_TURN_ON:
2561 /* All device pm is handled during resume and suspend */
2562 break;
2563 default:
2564 return -ENOTSUP;
2565 }
2566
2567 return 0;
2568 }
2569
dai_intel_ssp_init_device(const struct device * dev)2570 static int dai_intel_ssp_init_device(const struct device *dev)
2571 {
2572 struct dai_intel_ssp *dp = (struct dai_intel_ssp *)dev->data;
2573
2574 dp->ssp_plat_data = ssp_get_device_instance(dp->ssp_index);
2575
2576 return 0;
2577 };
2578
ssp_init(const struct device * dev)2579 static int ssp_init(const struct device *dev)
2580 {
2581 dai_intel_ssp_init_device(dev);
2582
2583 if (pm_device_on_power_domain(dev)) {
2584 pm_device_init_off(dev);
2585 } else {
2586 pm_device_init_suspended(dev);
2587 }
2588
2589 return pm_device_runtime_enable(dev);
2590 }
2591
dai_ssp_dma_control_set(const struct device * dev,const void * bespoke_cfg,size_t size)2592 static int dai_ssp_dma_control_set(const struct device *dev,
2593 const void *bespoke_cfg,
2594 size_t size)
2595 {
2596 struct dai_intel_ssp *dp = (struct dai_intel_ssp *)dev->data;
2597
2598 LOG_INF("SSP%d: tlv addr = 0x%x, tlv size = %d",
2599 dp->dai_index, (uint32_t)bespoke_cfg, size);
2600 if (size < sizeof(struct ssp_intel_aux_tlv)) {
2601 return -EINVAL;
2602 }
2603
2604 if (dp->state[DAI_DIR_PLAYBACK] != DAI_STATE_READY ||
2605 dp->state[DAI_DIR_CAPTURE] != DAI_STATE_READY) {
2606 return -EIO;
2607 }
2608
2609 if (dai_ssp_check_dma_control(bespoke_cfg, size)) {
2610 return -EINVAL;
2611 }
2612
2613 return dai_ssp_parse_tlv(dp, bespoke_cfg, size);
2614 }
2615
2616 static struct dai_driver_api dai_intel_ssp_api_funcs = {
2617 .probe = pm_device_runtime_get,
2618 .remove = pm_device_runtime_put,
2619 .config_set = dai_ssp_config_set,
2620 .config_get = dai_ssp_config_get,
2621 .trigger = dai_ssp_trigger,
2622 .get_properties = dai_ssp_get_properties,
2623 .config_update = dai_ssp_dma_control_set,
2624 };
2625
2626
2627 #define DT_DRV_COMPAT intel_ssp_dai
2628
2629 #define DAI_INTEL_SSP_DEVICE_INIT(n) \
2630 static struct dai_config dai_intel_ssp_config_##n = { \
2631 .type = DAI_INTEL_SSP, \
2632 .dai_index = DT_INST_REG_ADDR(n), \
2633 }; \
2634 static struct dai_intel_ssp dai_intel_ssp_data_##n = { \
2635 .dai_index = DT_INST_REG_ADDR(n), \
2636 .ssp_index = DT_PROP(DT_INST_PARENT(n), ssp_index), \
2637 .tdm_slot_group = 0, \
2638 }; \
2639 \
2640 PM_DEVICE_DT_INST_DEFINE(n, ssp_pm_action); \
2641 \
2642 DEVICE_DT_INST_DEFINE(n, \
2643 ssp_init, PM_DEVICE_DT_INST_GET(n), \
2644 &dai_intel_ssp_data_##n, \
2645 &dai_intel_ssp_config_##n, \
2646 POST_KERNEL, 42, \
2647 &dai_intel_ssp_api_funcs);
2648
2649 DT_INST_FOREACH_STATUS_OKAY(DAI_INTEL_SSP_DEVICE_INIT)
2650