1 /*
2  * Copyright (c) 2021 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stdlib.h>
8 #include <zephyr/drivers/i2s.h>
9 #include <zephyr/drivers/clock_control/nrf_clock_control.h>
10 #include <zephyr/drivers/pinctrl.h>
11 #include <soc.h>
12 #include <nrfx_i2s.h>
13 
14 #include <zephyr/logging/log.h>
15 #include <zephyr/irq.h>
16 LOG_MODULE_REGISTER(i2s_nrfx, CONFIG_I2S_LOG_LEVEL);
17 
18 struct stream_cfg {
19 	struct i2s_config cfg;
20 	nrfx_i2s_config_t nrfx_cfg;
21 };
22 
23 struct i2s_buf {
24 	void *mem_block;
25 	size_t size;
26 };
27 
28 struct i2s_nrfx_drv_data {
29 	struct onoff_manager *clk_mgr;
30 	struct onoff_client clk_cli;
31 	struct stream_cfg tx;
32 	struct k_msgq tx_queue;
33 	struct stream_cfg rx;
34 	struct k_msgq rx_queue;
35 	const nrfx_i2s_t *p_i2s;
36 	const uint32_t *last_tx_buffer;
37 	enum i2s_state state;
38 	enum i2s_dir active_dir;
39 	bool stop;       /* stop after the current (TX or RX) block */
40 	bool discard_rx; /* discard further RX blocks */
41 	volatile bool next_tx_buffer_needed;
42 	bool tx_configured : 1;
43 	bool rx_configured : 1;
44 	bool request_clock : 1;
45 };
46 
47 struct i2s_nrfx_drv_cfg {
48 	nrfx_i2s_data_handler_t data_handler;
49 	nrfx_i2s_t i2s;
50 	nrfx_i2s_config_t nrfx_def_cfg;
51 	const struct pinctrl_dev_config *pcfg;
52 	enum clock_source {
53 		PCLK32M,
54 		PCLK32M_HFXO,
55 		ACLK
56 	} clk_src;
57 };
58 
59 /* Finds the clock settings that give the frame clock frequency closest to
60  * the one requested, taking into account the hardware limitations.
61  */
find_suitable_clock(const struct i2s_nrfx_drv_cfg * drv_cfg,nrfx_i2s_config_t * config,const struct i2s_config * i2s_cfg)62 static void find_suitable_clock(const struct i2s_nrfx_drv_cfg *drv_cfg,
63 				nrfx_i2s_config_t *config,
64 				const struct i2s_config *i2s_cfg)
65 {
66 	static const struct {
67 		uint16_t        ratio_val;
68 		nrf_i2s_ratio_t ratio_enum;
69 	} ratios[] = {
70 		{  32, NRF_I2S_RATIO_32X },
71 		{  48, NRF_I2S_RATIO_48X },
72 		{  64, NRF_I2S_RATIO_64X },
73 		{  96, NRF_I2S_RATIO_96X },
74 		{ 128, NRF_I2S_RATIO_128X },
75 		{ 192, NRF_I2S_RATIO_192X },
76 		{ 256, NRF_I2S_RATIO_256X },
77 		{ 384, NRF_I2S_RATIO_384X },
78 		{ 512, NRF_I2S_RATIO_512X }
79 	};
80 	const uint32_t src_freq =
81 		(NRF_I2S_HAS_CLKCONFIG && drv_cfg->clk_src == ACLK)
82 		/* The I2S_NRFX_DEVICE() macro contains build assertions that
83 		 * make sure that the ACLK clock source is only used when it is
84 		 * available and only with the "hfclkaudio-frequency" property
85 		 * defined, but the default value of 0 here needs to be used to
86 		 * prevent compilation errors when the property is not defined
87 		 * (this expression will be eventually optimized away then).
88 		 */
89 		? DT_PROP_OR(DT_NODELABEL(clock), hfclkaudio_frequency, 0)
90 		: 32*1000*1000UL;
91 	uint32_t bits_per_frame = 2 * i2s_cfg->word_size;
92 	uint32_t best_diff = UINT32_MAX;
93 	uint8_t r, best_r = 0;
94 	nrf_i2s_mck_t best_mck_cfg = 0;
95 	uint32_t best_mck = 0;
96 
97 	for (r = 0; (best_diff != 0) && (r < ARRAY_SIZE(ratios)); ++r) {
98 		/* Only multiples of the frame width can be used as ratios. */
99 		if ((ratios[r].ratio_val % bits_per_frame) != 0) {
100 			continue;
101 		}
102 
103 		if (IS_ENABLED(CONFIG_SOC_SERIES_NRF53X) || IS_ENABLED(CONFIG_SOC_SERIES_NRF54LX)) {
104 			uint32_t requested_mck =
105 				i2s_cfg->frame_clk_freq * ratios[r].ratio_val;
106 			/* As specified in the nRF5340 PS:
107 			 *
108 			 * MCKFREQ = 4096 * floor(f_MCK * 1048576 /
109 			 *                        (f_source + f_MCK / 2))
110 			 * f_actual = f_source /
111 			 *            floor(1048576 * 4096 / MCKFREQ)
112 			 */
113 			enum { MCKCONST = 1048576 };
114 			uint32_t mck_factor =
115 				(uint32_t)(((uint64_t)requested_mck * MCKCONST) /
116 					   (src_freq + requested_mck / 2));
117 
118 			/* skip cases when mck_factor is too big for dividing */
119 			if (mck_factor > MCKCONST) {
120 				continue;
121 			}
122 			uint32_t actual_mck = src_freq / (MCKCONST / mck_factor);
123 
124 			uint32_t lrck_freq = actual_mck / ratios[r].ratio_val;
125 			uint32_t diff = lrck_freq >= i2s_cfg->frame_clk_freq
126 					? (lrck_freq - i2s_cfg->frame_clk_freq)
127 					: (i2s_cfg->frame_clk_freq - lrck_freq);
128 
129 			if (diff < best_diff) {
130 				best_mck_cfg = mck_factor * 4096;
131 				best_mck = actual_mck;
132 				best_r = r;
133 				best_diff = diff;
134 			}
135 		} else {
136 			static const struct {
137 				uint8_t       divider_val;
138 				nrf_i2s_mck_t divider_enum;
139 			} dividers[] = {
140 				{   8, NRF_I2S_MCK_32MDIV8 },
141 				{  10, NRF_I2S_MCK_32MDIV10 },
142 				{  11, NRF_I2S_MCK_32MDIV11 },
143 				{  15, NRF_I2S_MCK_32MDIV15 },
144 				{  16, NRF_I2S_MCK_32MDIV16 },
145 				{  21, NRF_I2S_MCK_32MDIV21 },
146 				{  23, NRF_I2S_MCK_32MDIV23 },
147 				{  30, NRF_I2S_MCK_32MDIV30 },
148 				{  31, NRF_I2S_MCK_32MDIV31 },
149 				{  32, NRF_I2S_MCK_32MDIV32 },
150 				{  42, NRF_I2S_MCK_32MDIV42 },
151 				{  63, NRF_I2S_MCK_32MDIV63 },
152 				{ 125, NRF_I2S_MCK_32MDIV125 }
153 			};
154 
155 			for (uint8_t d = 0; (best_diff != 0) && (d < ARRAY_SIZE(dividers)); ++d) {
156 				uint32_t mck_freq =
157 					src_freq / dividers[d].divider_val;
158 				uint32_t lrck_freq =
159 					mck_freq / ratios[r].ratio_val;
160 				uint32_t diff =
161 					lrck_freq >= i2s_cfg->frame_clk_freq
162 					? (lrck_freq - i2s_cfg->frame_clk_freq)
163 					: (i2s_cfg->frame_clk_freq - lrck_freq);
164 
165 				if (diff < best_diff) {
166 					best_mck_cfg = dividers[d].divider_enum;
167 					best_mck = mck_freq;
168 					best_r = r;
169 					best_diff = diff;
170 				}
171 
172 				/* Since dividers are in ascending order, stop
173 				 * checking next ones for the current ratio
174 				 * after resulting LRCK frequency falls below
175 				 * the one requested.
176 				 */
177 				if (lrck_freq < i2s_cfg->frame_clk_freq) {
178 					break;
179 				}
180 			}
181 		}
182 	}
183 
184 	config->mck_setup = best_mck_cfg;
185 	config->ratio = ratios[best_r].ratio_enum;
186 	LOG_INF("I2S MCK frequency: %u, actual PCM rate: %u",
187 		best_mck, best_mck / ratios[best_r].ratio_val);
188 }
189 
get_next_tx_buffer(struct i2s_nrfx_drv_data * drv_data,nrfx_i2s_buffers_t * buffers)190 static bool get_next_tx_buffer(struct i2s_nrfx_drv_data *drv_data,
191 			       nrfx_i2s_buffers_t *buffers)
192 {
193 	struct i2s_buf buf;
194 	int ret = k_msgq_get(&drv_data->tx_queue,
195 			     &buf,
196 			     K_NO_WAIT);
197 	if (ret == 0) {
198 		buffers->p_tx_buffer = buf.mem_block;
199 		buffers->buffer_size = buf.size / sizeof(uint32_t);
200 	}
201 	return (ret == 0);
202 }
203 
get_next_rx_buffer(struct i2s_nrfx_drv_data * drv_data,nrfx_i2s_buffers_t * buffers)204 static bool get_next_rx_buffer(struct i2s_nrfx_drv_data *drv_data,
205 			       nrfx_i2s_buffers_t *buffers)
206 {
207 	int ret = k_mem_slab_alloc(drv_data->rx.cfg.mem_slab,
208 				   (void **)&buffers->p_rx_buffer,
209 				   K_NO_WAIT);
210 	if (ret < 0) {
211 		LOG_ERR("Failed to allocate next RX buffer: %d",
212 			ret);
213 		return false;
214 	}
215 
216 	return true;
217 }
218 
free_tx_buffer(struct i2s_nrfx_drv_data * drv_data,const void * buffer)219 static void free_tx_buffer(struct i2s_nrfx_drv_data *drv_data,
220 			   const void *buffer)
221 {
222 	k_mem_slab_free(drv_data->tx.cfg.mem_slab, (void *)buffer);
223 	LOG_DBG("Freed TX %p", buffer);
224 }
225 
free_rx_buffer(struct i2s_nrfx_drv_data * drv_data,void * buffer)226 static void free_rx_buffer(struct i2s_nrfx_drv_data *drv_data, void *buffer)
227 {
228 	k_mem_slab_free(drv_data->rx.cfg.mem_slab, buffer);
229 	LOG_DBG("Freed RX %p", buffer);
230 }
231 
supply_next_buffers(struct i2s_nrfx_drv_data * drv_data,nrfx_i2s_buffers_t * next)232 static bool supply_next_buffers(struct i2s_nrfx_drv_data *drv_data,
233 				nrfx_i2s_buffers_t *next)
234 {
235 	if (drv_data->active_dir != I2S_DIR_TX) { /* -> RX active */
236 		if (!get_next_rx_buffer(drv_data, next)) {
237 			drv_data->state = I2S_STATE_ERROR;
238 			nrfx_i2s_stop(drv_data->p_i2s);
239 			return false;
240 		}
241 		/* Set buffer size if there is no TX buffer (which effectively
242 		 * controls how many bytes will be received).
243 		 */
244 		if (drv_data->active_dir == I2S_DIR_RX) {
245 			next->buffer_size =
246 				drv_data->rx.cfg.block_size / sizeof(uint32_t);
247 		}
248 	}
249 
250 	drv_data->last_tx_buffer = next->p_tx_buffer;
251 
252 	LOG_DBG("Next buffers: %p/%p", next->p_tx_buffer, next->p_rx_buffer);
253 	nrfx_i2s_next_buffers_set(drv_data->p_i2s, next);
254 	return true;
255 }
256 
data_handler(const struct device * dev,const nrfx_i2s_buffers_t * released,uint32_t status)257 static void data_handler(const struct device *dev,
258 			 const nrfx_i2s_buffers_t *released, uint32_t status)
259 {
260 	struct i2s_nrfx_drv_data *drv_data = dev->data;
261 	bool stop_transfer = false;
262 
263 	if (status & NRFX_I2S_STATUS_TRANSFER_STOPPED) {
264 		if (drv_data->state == I2S_STATE_STOPPING) {
265 			drv_data->state = I2S_STATE_READY;
266 		}
267 		if (drv_data->last_tx_buffer) {
268 			/* Usually, these pointers are equal, i.e. the last TX
269 			 * buffer that were to be transferred is released by the
270 			 * driver after it stops. The last TX buffer pointer is
271 			 * then set to NULL here so that the buffer can be freed
272 			 * below, just as any other TX buffer released by the
273 			 * driver. However, it may happen that the buffer is not
274 			 * released this way, for example, when the transfer
275 			 * ends with an error because an RX buffer allocation
276 			 * fails. In such case, the last TX buffer needs to be
277 			 * freed here.
278 			 */
279 			if (drv_data->last_tx_buffer != released->p_tx_buffer) {
280 				free_tx_buffer(drv_data,
281 					       drv_data->last_tx_buffer);
282 			}
283 			drv_data->last_tx_buffer = NULL;
284 		}
285 		nrfx_i2s_uninit(drv_data->p_i2s);
286 		if (drv_data->request_clock) {
287 			(void)onoff_release(drv_data->clk_mgr);
288 		}
289 	}
290 
291 	if (released == NULL) {
292 		/* This means that buffers for the next part of the transfer
293 		 * were not supplied and the previous ones cannot be released
294 		 * yet, as pointers to them were latched in the I2S registers.
295 		 * It is not an error when the transfer is to be stopped (those
296 		 * buffers will be released after the transfer actually stops).
297 		 */
298 		if (drv_data->state != I2S_STATE_STOPPING) {
299 			LOG_ERR("Next buffers not supplied on time");
300 			drv_data->state = I2S_STATE_ERROR;
301 		}
302 		nrfx_i2s_stop(drv_data->p_i2s);
303 		return;
304 	}
305 
306 	if (released->p_rx_buffer) {
307 		if (drv_data->discard_rx) {
308 			free_rx_buffer(drv_data, released->p_rx_buffer);
309 		} else {
310 			struct i2s_buf buf = {
311 				.mem_block = released->p_rx_buffer,
312 				.size = released->buffer_size * sizeof(uint32_t)
313 			};
314 			int ret = k_msgq_put(&drv_data->rx_queue,
315 					     &buf,
316 					     K_NO_WAIT);
317 			if (ret < 0) {
318 				LOG_ERR("No room in RX queue");
319 				drv_data->state = I2S_STATE_ERROR;
320 				stop_transfer = true;
321 
322 				free_rx_buffer(drv_data, released->p_rx_buffer);
323 			} else {
324 				LOG_DBG("Queued RX %p", released->p_rx_buffer);
325 
326 				/* If the TX direction is not active and
327 				 * the transfer should be stopped after
328 				 * the current block, stop the reception.
329 				 */
330 				if (drv_data->active_dir == I2S_DIR_RX &&
331 				    drv_data->stop) {
332 					drv_data->discard_rx = true;
333 					stop_transfer = true;
334 				}
335 			}
336 		}
337 	}
338 
339 	if (released->p_tx_buffer) {
340 		/* If the last buffer that was to be transferred has just been
341 		 * released, it is time to stop the transfer.
342 		 */
343 		if (released->p_tx_buffer == drv_data->last_tx_buffer) {
344 			drv_data->discard_rx = true;
345 			stop_transfer = true;
346 		} else {
347 			free_tx_buffer(drv_data, released->p_tx_buffer);
348 		}
349 	}
350 
351 	if (stop_transfer) {
352 		nrfx_i2s_stop(drv_data->p_i2s);
353 	} else if (status & NRFX_I2S_STATUS_NEXT_BUFFERS_NEEDED) {
354 		nrfx_i2s_buffers_t next = { 0 };
355 
356 		if (drv_data->active_dir != I2S_DIR_RX) { /* -> TX active */
357 			if (drv_data->stop) {
358 				/* If the stream is to be stopped, don't get
359 				 * the next TX buffer from the queue, instead
360 				 * supply the one used last time (it won't be
361 				 * transferred, the stream will stop right
362 				 * before this buffer would be started again).
363 				 */
364 				next.p_tx_buffer = drv_data->last_tx_buffer;
365 				next.buffer_size = 1;
366 			} else if (get_next_tx_buffer(drv_data, &next)) {
367 				/* Next TX buffer successfully retrieved from
368 				 * the queue, nothing more to do here.
369 				 */
370 			} else if (drv_data->state == I2S_STATE_STOPPING) {
371 				/* If there are no more TX blocks queued and
372 				 * the current state is STOPPING (so the DRAIN
373 				 * command was triggered) it is time to finish
374 				 * the transfer.
375 				 */
376 				drv_data->stop = true;
377 				/* Supply the same buffer as last time; it will
378 				 * not be transferred anyway, as the transfer
379 				 * will be stopped earlier.
380 				 */
381 				next.p_tx_buffer = drv_data->last_tx_buffer;
382 				next.buffer_size = 1;
383 			} else {
384 				/* Next TX buffer cannot be supplied now.
385 				 * Defer it to when the user writes more data.
386 				 */
387 				drv_data->next_tx_buffer_needed = true;
388 				return;
389 			}
390 		}
391 
392 		(void)supply_next_buffers(drv_data, &next);
393 	}
394 }
395 
purge_queue(const struct device * dev,enum i2s_dir dir)396 static void purge_queue(const struct device *dev, enum i2s_dir dir)
397 {
398 	struct i2s_nrfx_drv_data *drv_data = dev->data;
399 	struct i2s_buf buf;
400 
401 	if (dir == I2S_DIR_TX || dir == I2S_DIR_BOTH) {
402 		while (k_msgq_get(&drv_data->tx_queue,
403 				  &buf,
404 				  K_NO_WAIT) == 0) {
405 			free_tx_buffer(drv_data, buf.mem_block);
406 		}
407 	}
408 
409 	if (dir == I2S_DIR_RX || dir == I2S_DIR_BOTH) {
410 		while (k_msgq_get(&drv_data->rx_queue,
411 				  &buf,
412 				  K_NO_WAIT) == 0) {
413 			free_rx_buffer(drv_data, buf.mem_block);
414 		}
415 	}
416 }
417 
i2s_nrfx_configure(const struct device * dev,enum i2s_dir dir,const struct i2s_config * i2s_cfg)418 static int i2s_nrfx_configure(const struct device *dev, enum i2s_dir dir,
419 			      const struct i2s_config *i2s_cfg)
420 {
421 	struct i2s_nrfx_drv_data *drv_data = dev->data;
422 	const struct i2s_nrfx_drv_cfg *drv_cfg = dev->config;
423 	nrfx_i2s_config_t nrfx_cfg;
424 
425 	if (drv_data->state != I2S_STATE_READY) {
426 		LOG_ERR("Cannot configure in state: %d", drv_data->state);
427 		return -EINVAL;
428 	}
429 
430 	if (i2s_cfg->frame_clk_freq == 0) { /* -> reset state */
431 		purge_queue(dev, dir);
432 		if (dir == I2S_DIR_TX || dir == I2S_DIR_BOTH) {
433 			drv_data->tx_configured = false;
434 			memset(&drv_data->tx, 0, sizeof(drv_data->tx));
435 		}
436 		if (dir == I2S_DIR_RX || dir == I2S_DIR_BOTH) {
437 			drv_data->rx_configured = false;
438 			memset(&drv_data->rx, 0, sizeof(drv_data->rx));
439 		}
440 		return 0;
441 	}
442 
443 	__ASSERT_NO_MSG(i2s_cfg->mem_slab != NULL &&
444 			i2s_cfg->block_size != 0);
445 
446 	if ((i2s_cfg->block_size % sizeof(uint32_t)) != 0) {
447 		LOG_ERR("This device can transfer only full 32-bit words");
448 		return -EINVAL;
449 	}
450 
451 	nrfx_cfg = drv_cfg->nrfx_def_cfg;
452 
453 	switch (i2s_cfg->word_size) {
454 	case 8:
455 		nrfx_cfg.sample_width = NRF_I2S_SWIDTH_8BIT;
456 		break;
457 	case 16:
458 		nrfx_cfg.sample_width = NRF_I2S_SWIDTH_16BIT;
459 		break;
460 	case 24:
461 		nrfx_cfg.sample_width = NRF_I2S_SWIDTH_24BIT;
462 		break;
463 #if defined(I2S_CONFIG_SWIDTH_SWIDTH_32Bit)
464 	case 32:
465 		nrfx_cfg.sample_width = NRF_I2S_SWIDTH_32BIT;
466 		break;
467 #endif
468 	default:
469 		LOG_ERR("Unsupported word size: %u", i2s_cfg->word_size);
470 		return -EINVAL;
471 	}
472 
473 	switch (i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) {
474 	case I2S_FMT_DATA_FORMAT_I2S:
475 		nrfx_cfg.alignment = NRF_I2S_ALIGN_LEFT;
476 		nrfx_cfg.format = NRF_I2S_FORMAT_I2S;
477 		break;
478 	case I2S_FMT_DATA_FORMAT_LEFT_JUSTIFIED:
479 		nrfx_cfg.alignment = NRF_I2S_ALIGN_LEFT;
480 		nrfx_cfg.format = NRF_I2S_FORMAT_ALIGNED;
481 		break;
482 	case I2S_FMT_DATA_FORMAT_RIGHT_JUSTIFIED:
483 		nrfx_cfg.alignment = NRF_I2S_ALIGN_RIGHT;
484 		nrfx_cfg.format = NRF_I2S_FORMAT_ALIGNED;
485 		break;
486 	default:
487 		LOG_ERR("Unsupported data format: 0x%02x", i2s_cfg->format);
488 		return -EINVAL;
489 	}
490 
491 	if ((i2s_cfg->format & I2S_FMT_DATA_ORDER_LSB) ||
492 	    (i2s_cfg->format & I2S_FMT_BIT_CLK_INV) ||
493 	    (i2s_cfg->format & I2S_FMT_FRAME_CLK_INV)) {
494 		LOG_ERR("Unsupported stream format: 0x%02x", i2s_cfg->format);
495 		return -EINVAL;
496 	}
497 
498 	if (i2s_cfg->channels == 2) {
499 		nrfx_cfg.channels = NRF_I2S_CHANNELS_STEREO;
500 	} else if (i2s_cfg->channels == 1) {
501 		nrfx_cfg.channels = NRF_I2S_CHANNELS_LEFT;
502 	} else {
503 		LOG_ERR("Unsupported number of channels: %u",
504 			i2s_cfg->channels);
505 		return -EINVAL;
506 	}
507 
508 	if ((i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE) &&
509 	    (i2s_cfg->options & I2S_OPT_FRAME_CLK_SLAVE)) {
510 		nrfx_cfg.mode = NRF_I2S_MODE_SLAVE;
511 	} else if (!(i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE) &&
512 		   !(i2s_cfg->options & I2S_OPT_FRAME_CLK_SLAVE)) {
513 		nrfx_cfg.mode = NRF_I2S_MODE_MASTER;
514 	} else {
515 		LOG_ERR("Unsupported operation mode: 0x%02x", i2s_cfg->options);
516 		return -EINVAL;
517 	}
518 
519 	/* If the master clock generator is needed (i.e. in Master mode or when
520 	 * the MCK output is used), find a suitable clock configuration for it.
521 	 */
522 	if (nrfx_cfg.mode == NRF_I2S_MODE_MASTER ||
523 	    (nrf_i2s_mck_pin_get(drv_cfg->i2s.p_reg) & I2S_PSEL_MCK_CONNECT_Msk)
524 	    == I2S_PSEL_MCK_CONNECT_Connected << I2S_PSEL_MCK_CONNECT_Pos) {
525 		find_suitable_clock(drv_cfg, &nrfx_cfg, i2s_cfg);
526 		/* Unless the PCLK32M source is used with the HFINT oscillator
527 		 * (which is always available without any additional actions),
528 		 * it is required to request the proper clock to be running
529 		 * before starting the transfer itself.
530 		 */
531 		drv_data->request_clock = (drv_cfg->clk_src != PCLK32M);
532 	} else {
533 		nrfx_cfg.mck_setup = NRF_I2S_MCK_DISABLED;
534 		drv_data->request_clock = false;
535 	}
536 
537 	if ((i2s_cfg->options & I2S_OPT_LOOPBACK) ||
538 	    (i2s_cfg->options & I2S_OPT_PINGPONG)) {
539 		LOG_ERR("Unsupported options: 0x%02x", i2s_cfg->options);
540 		return -EINVAL;
541 	}
542 
543 	if (dir == I2S_DIR_TX || dir == I2S_DIR_BOTH) {
544 		drv_data->tx.cfg = *i2s_cfg;
545 		drv_data->tx.nrfx_cfg = nrfx_cfg;
546 		drv_data->tx_configured = true;
547 	}
548 
549 	if (dir == I2S_DIR_RX || dir == I2S_DIR_BOTH) {
550 		drv_data->rx.cfg = *i2s_cfg;
551 		drv_data->rx.nrfx_cfg = nrfx_cfg;
552 		drv_data->rx_configured = true;
553 	}
554 
555 	return 0;
556 }
557 
i2s_nrfx_config_get(const struct device * dev,enum i2s_dir dir)558 static const struct i2s_config *i2s_nrfx_config_get(const struct device *dev,
559 						    enum i2s_dir dir)
560 {
561 	struct i2s_nrfx_drv_data *drv_data = dev->data;
562 
563 	if (dir == I2S_DIR_TX && drv_data->tx_configured) {
564 		return &drv_data->tx.cfg;
565 	}
566 	if (dir == I2S_DIR_RX && drv_data->rx_configured) {
567 		return &drv_data->rx.cfg;
568 	}
569 
570 	return NULL;
571 }
572 
i2s_nrfx_read(const struct device * dev,void ** mem_block,size_t * size)573 static int i2s_nrfx_read(const struct device *dev,
574 			 void **mem_block, size_t *size)
575 {
576 	struct i2s_nrfx_drv_data *drv_data = dev->data;
577 	struct i2s_buf buf;
578 	int ret;
579 
580 	if (!drv_data->rx_configured) {
581 		LOG_ERR("Device is not configured");
582 		return -EIO;
583 	}
584 
585 	ret = k_msgq_get(&drv_data->rx_queue,
586 			 &buf,
587 			 (drv_data->state == I2S_STATE_ERROR)
588 				? K_NO_WAIT
589 				: SYS_TIMEOUT_MS(drv_data->rx.cfg.timeout));
590 	if (ret == -ENOMSG) {
591 		return -EIO;
592 	}
593 
594 	LOG_DBG("Released RX %p", buf.mem_block);
595 
596 	if (ret == 0) {
597 		*mem_block = buf.mem_block;
598 		*size = buf.size;
599 	}
600 
601 	return ret;
602 }
603 
i2s_nrfx_write(const struct device * dev,void * mem_block,size_t size)604 static int i2s_nrfx_write(const struct device *dev,
605 			  void *mem_block, size_t size)
606 {
607 	struct i2s_nrfx_drv_data *drv_data = dev->data;
608 	struct i2s_buf buf = { .mem_block = mem_block, .size = size };
609 	int ret;
610 
611 	if (!drv_data->tx_configured) {
612 		LOG_ERR("Device is not configured");
613 		return -EIO;
614 	}
615 
616 	if (drv_data->state != I2S_STATE_RUNNING &&
617 	    drv_data->state != I2S_STATE_READY) {
618 		LOG_ERR("Cannot write in state: %d", drv_data->state);
619 		return -EIO;
620 	}
621 
622 	if (size > drv_data->tx.cfg.block_size || size < sizeof(uint32_t)) {
623 		LOG_ERR("This device can only write blocks up to %u bytes",
624 			drv_data->tx.cfg.block_size);
625 		return -EIO;
626 	}
627 
628 	ret = k_msgq_put(&drv_data->tx_queue,
629 			 &buf,
630 			 SYS_TIMEOUT_MS(drv_data->tx.cfg.timeout));
631 	if (ret < 0) {
632 		return ret;
633 	}
634 
635 	LOG_DBG("Queued TX %p", mem_block);
636 
637 	/* Check if interrupt wanted to get next TX buffer before current buffer
638 	 * was queued. Do not move this check before queuing because doing so
639 	 * opens the possibility for a race condition between this function and
640 	 * data_handler() that is called in interrupt context.
641 	 */
642 	if (drv_data->state == I2S_STATE_RUNNING &&
643 	    drv_data->next_tx_buffer_needed) {
644 		nrfx_i2s_buffers_t next = { 0 };
645 
646 		if (!get_next_tx_buffer(drv_data, &next)) {
647 			/* Log error because this is definitely unexpected.
648 			 * Do not return error because the caller is no longer
649 			 * responsible for releasing the buffer.
650 			 */
651 			LOG_ERR("Cannot reacquire queued buffer");
652 			return 0;
653 		}
654 
655 		drv_data->next_tx_buffer_needed = false;
656 
657 		LOG_DBG("Next TX %p", next.p_tx_buffer);
658 
659 		if (!supply_next_buffers(drv_data, &next)) {
660 			return -EIO;
661 		}
662 
663 	}
664 
665 	return 0;
666 }
667 
start_transfer(struct i2s_nrfx_drv_data * drv_data)668 static int start_transfer(struct i2s_nrfx_drv_data *drv_data)
669 {
670 	nrfx_i2s_buffers_t initial_buffers = { 0 };
671 	int ret;
672 
673 	if (drv_data->active_dir != I2S_DIR_RX && /* -> TX to be started */
674 	    !get_next_tx_buffer(drv_data, &initial_buffers)) {
675 		LOG_ERR("No TX buffer available");
676 		ret = -ENOMEM;
677 	} else if (drv_data->active_dir != I2S_DIR_TX && /* -> RX to be started */
678 		   !get_next_rx_buffer(drv_data, &initial_buffers)) {
679 		/* Failed to allocate next RX buffer */
680 		ret = -ENOMEM;
681 	} else {
682 		nrfx_err_t err;
683 
684 		/* It is necessary to set buffer size here only for I2S_DIR_RX,
685 		 * because only then the get_next_tx_buffer() call in the if
686 		 * condition above gets short-circuited.
687 		 */
688 		if (drv_data->active_dir == I2S_DIR_RX) {
689 			initial_buffers.buffer_size =
690 				drv_data->rx.cfg.block_size / sizeof(uint32_t);
691 		}
692 
693 		drv_data->last_tx_buffer = initial_buffers.p_tx_buffer;
694 
695 		err = nrfx_i2s_start(drv_data->p_i2s, &initial_buffers, 0);
696 		if (err == NRFX_SUCCESS) {
697 			return 0;
698 		}
699 
700 		LOG_ERR("Failed to start I2S transfer: 0x%08x", err);
701 		ret = -EIO;
702 	}
703 
704 	nrfx_i2s_uninit(drv_data->p_i2s);
705 	if (drv_data->request_clock) {
706 		(void)onoff_release(drv_data->clk_mgr);
707 	}
708 
709 	if (initial_buffers.p_tx_buffer) {
710 		free_tx_buffer(drv_data, initial_buffers.p_tx_buffer);
711 	}
712 	if (initial_buffers.p_rx_buffer) {
713 		free_rx_buffer(drv_data, initial_buffers.p_rx_buffer);
714 	}
715 
716 	drv_data->state = I2S_STATE_ERROR;
717 	return ret;
718 }
719 
clock_started_callback(struct onoff_manager * mgr,struct onoff_client * cli,uint32_t state,int res)720 static void clock_started_callback(struct onoff_manager *mgr,
721 				   struct onoff_client *cli,
722 				   uint32_t state,
723 				   int res)
724 {
725 	struct i2s_nrfx_drv_data *drv_data =
726 		CONTAINER_OF(cli, struct i2s_nrfx_drv_data, clk_cli);
727 
728 	/* The driver state can be set back to READY at this point if the DROP
729 	 * command was triggered before the clock has started. Do not start
730 	 * the actual transfer in such case.
731 	 */
732 	if (drv_data->state == I2S_STATE_READY) {
733 		nrfx_i2s_uninit(drv_data->p_i2s);
734 		(void)onoff_release(drv_data->clk_mgr);
735 	} else {
736 		(void)start_transfer(drv_data);
737 	}
738 }
739 
trigger_start(const struct device * dev)740 static int trigger_start(const struct device *dev)
741 {
742 	struct i2s_nrfx_drv_data *drv_data = dev->data;
743 	const struct i2s_nrfx_drv_cfg *drv_cfg = dev->config;
744 	nrfx_err_t err;
745 	int ret;
746 	const nrfx_i2s_config_t *nrfx_cfg = (drv_data->active_dir == I2S_DIR_TX)
747 					    ? &drv_data->tx.nrfx_cfg
748 					    : &drv_data->rx.nrfx_cfg;
749 
750 	err = nrfx_i2s_init(drv_data->p_i2s, nrfx_cfg, drv_cfg->data_handler);
751 	if (err != NRFX_SUCCESS) {
752 		LOG_ERR("Failed to initialize I2S: 0x%08x", err);
753 		return -EIO;
754 	}
755 
756 	drv_data->state = I2S_STATE_RUNNING;
757 
758 #if NRF_I2S_HAS_CLKCONFIG
759 	nrf_i2s_clk_configure(drv_cfg->i2s.p_reg,
760 			      drv_cfg->clk_src == ACLK ? NRF_I2S_CLKSRC_ACLK
761 						       : NRF_I2S_CLKSRC_PCLK32M,
762 			      false);
763 #endif
764 
765 	/* If it is required to use certain HF clock, request it to be running
766 	 * first. If not, start the transfer directly.
767 	 */
768 	if (drv_data->request_clock) {
769 		sys_notify_init_callback(&drv_data->clk_cli.notify,
770 					 clock_started_callback);
771 		ret = onoff_request(drv_data->clk_mgr, &drv_data->clk_cli);
772 		if (ret < 0) {
773 			nrfx_i2s_uninit(drv_data->p_i2s);
774 			drv_data->state = I2S_STATE_READY;
775 
776 			LOG_ERR("Failed to request clock: %d", ret);
777 			return -EIO;
778 		}
779 	} else {
780 		ret = start_transfer(drv_data);
781 		if (ret < 0) {
782 			return ret;
783 		}
784 	}
785 
786 	return 0;
787 }
788 
i2s_nrfx_trigger(const struct device * dev,enum i2s_dir dir,enum i2s_trigger_cmd cmd)789 static int i2s_nrfx_trigger(const struct device *dev,
790 			    enum i2s_dir dir, enum i2s_trigger_cmd cmd)
791 {
792 	struct i2s_nrfx_drv_data *drv_data = dev->data;
793 	bool configured = false;
794 	bool cmd_allowed;
795 
796 	/* This driver does not use the I2S_STATE_NOT_READY value.
797 	 * Instead, if a given stream is not configured, the respective
798 	 * flag (tx_configured or rx_configured) is cleared.
799 	 */
800 	if (dir == I2S_DIR_BOTH) {
801 		configured = drv_data->tx_configured && drv_data->rx_configured;
802 	} else if (dir == I2S_DIR_TX) {
803 		configured = drv_data->tx_configured;
804 	} else if (dir == I2S_DIR_RX) {
805 		configured = drv_data->rx_configured;
806 	}
807 
808 	if (!configured) {
809 		LOG_ERR("Device is not configured");
810 		return -EIO;
811 	}
812 
813 	if (dir == I2S_DIR_BOTH &&
814 	    (memcmp(&drv_data->tx.nrfx_cfg,
815 		    &drv_data->rx.nrfx_cfg,
816 		    sizeof(drv_data->rx.nrfx_cfg)) != 0
817 	     ||
818 	     (drv_data->tx.cfg.block_size != drv_data->rx.cfg.block_size))) {
819 		LOG_ERR("TX and RX configurations are different");
820 		return -EIO;
821 	}
822 
823 	switch (cmd) {
824 	case I2S_TRIGGER_START:
825 		cmd_allowed = (drv_data->state == I2S_STATE_READY);
826 		break;
827 	case I2S_TRIGGER_STOP:
828 	case I2S_TRIGGER_DRAIN:
829 		cmd_allowed = (drv_data->state == I2S_STATE_RUNNING);
830 		break;
831 	case I2S_TRIGGER_DROP:
832 		cmd_allowed = configured;
833 		break;
834 	case I2S_TRIGGER_PREPARE:
835 		cmd_allowed = (drv_data->state == I2S_STATE_ERROR);
836 		break;
837 	default:
838 		LOG_ERR("Invalid trigger: %d", cmd);
839 		return -EINVAL;
840 	}
841 
842 	if (!cmd_allowed) {
843 		return -EIO;
844 	}
845 
846 	/* For triggers applicable to the RUNNING state (i.e. STOP, DRAIN,
847 	 * and DROP), ensure that the command is applied to the streams
848 	 * that are currently active (this device cannot e.g. stop only TX
849 	 * without stopping RX).
850 	 */
851 	if (drv_data->state == I2S_STATE_RUNNING &&
852 	    drv_data->active_dir != dir) {
853 		LOG_ERR("Inappropriate trigger (%d/%d), active stream(s): %d",
854 			cmd, dir, drv_data->active_dir);
855 		return -EINVAL;
856 	}
857 
858 	switch (cmd) {
859 	case I2S_TRIGGER_START:
860 		drv_data->stop = false;
861 		drv_data->discard_rx = false;
862 		drv_data->active_dir = dir;
863 		drv_data->next_tx_buffer_needed = false;
864 		return trigger_start(dev);
865 
866 	case I2S_TRIGGER_STOP:
867 		drv_data->state = I2S_STATE_STOPPING;
868 		drv_data->stop = true;
869 		return 0;
870 
871 	case I2S_TRIGGER_DRAIN:
872 		drv_data->state = I2S_STATE_STOPPING;
873 		/* If only RX is active, DRAIN is equivalent to STOP. */
874 		drv_data->stop = (drv_data->active_dir == I2S_DIR_RX);
875 		return 0;
876 
877 	case I2S_TRIGGER_DROP:
878 		if (drv_data->state != I2S_STATE_READY) {
879 			drv_data->discard_rx = true;
880 			nrfx_i2s_stop(drv_data->p_i2s);
881 		}
882 		purge_queue(dev, dir);
883 		drv_data->state = I2S_STATE_READY;
884 		return 0;
885 
886 	case I2S_TRIGGER_PREPARE:
887 		purge_queue(dev, dir);
888 		drv_data->state = I2S_STATE_READY;
889 		return 0;
890 
891 	default:
892 		LOG_ERR("Invalid trigger: %d", cmd);
893 		return -EINVAL;
894 	}
895 }
896 
init_clock_manager(const struct device * dev)897 static void init_clock_manager(const struct device *dev)
898 {
899 	struct i2s_nrfx_drv_data *drv_data = dev->data;
900 	clock_control_subsys_t subsys;
901 
902 #if NRF_CLOCK_HAS_HFCLKAUDIO
903 	const struct i2s_nrfx_drv_cfg *drv_cfg = dev->config;
904 
905 	if (drv_cfg->clk_src == ACLK) {
906 		subsys = CLOCK_CONTROL_NRF_SUBSYS_HFAUDIO;
907 	} else
908 #endif
909 	{
910 		subsys = CLOCK_CONTROL_NRF_SUBSYS_HF;
911 	}
912 
913 	drv_data->clk_mgr = z_nrf_clock_control_get_onoff(subsys);
914 	__ASSERT_NO_MSG(drv_data->clk_mgr != NULL);
915 }
916 
917 static const struct i2s_driver_api i2s_nrf_drv_api = {
918 	.configure = i2s_nrfx_configure,
919 	.config_get = i2s_nrfx_config_get,
920 	.read = i2s_nrfx_read,
921 	.write = i2s_nrfx_write,
922 	.trigger = i2s_nrfx_trigger,
923 };
924 
925 #define I2S(idx) DT_NODELABEL(i2s##idx)
926 #define I2S_CLK_SRC(idx) DT_STRING_TOKEN(I2S(idx), clock_source)
927 
928 #define I2S_NRFX_DEVICE(idx)						     \
929 	static struct i2s_buf tx_msgs##idx[CONFIG_I2S_NRFX_TX_BLOCK_COUNT];  \
930 	static struct i2s_buf rx_msgs##idx[CONFIG_I2S_NRFX_RX_BLOCK_COUNT];  \
931 	static void data_handler##idx(nrfx_i2s_buffers_t const *p_released,  \
932 				      uint32_t status)			     \
933 	{								     \
934 		data_handler(DEVICE_DT_GET(I2S(idx)), p_released, status);   \
935 	}								     \
936 	PINCTRL_DT_DEFINE(I2S(idx));					     \
937 	static const struct i2s_nrfx_drv_cfg i2s_nrfx_cfg##idx = {	     \
938 		.data_handler = data_handler##idx,			     \
939 		.i2s = NRFX_I2S_INSTANCE(idx),				     \
940 		.nrfx_def_cfg = NRFX_I2S_DEFAULT_CONFIG(		     \
941 			NRF_I2S_PIN_NOT_CONNECTED,			     \
942 			NRF_I2S_PIN_NOT_CONNECTED,			     \
943 			NRF_I2S_PIN_NOT_CONNECTED,			     \
944 			NRF_I2S_PIN_NOT_CONNECTED,			     \
945 			NRF_I2S_PIN_NOT_CONNECTED),			     \
946 		.nrfx_def_cfg.skip_gpio_cfg = true,			     \
947 		.nrfx_def_cfg.skip_psel_cfg = true,			     \
948 		.pcfg = PINCTRL_DT_DEV_CONFIG_GET(I2S(idx)),		     \
949 		.clk_src = I2S_CLK_SRC(idx),				     \
950 	};								     \
951 	static struct i2s_nrfx_drv_data i2s_nrfx_data##idx = {		     \
952 		.state = I2S_STATE_READY,				     \
953 		.p_i2s = &i2s_nrfx_cfg##idx.i2s				     \
954 	};								     \
955 	static int i2s_nrfx_init##idx(const struct device *dev)		     \
956 	{								     \
957 		IRQ_CONNECT(DT_IRQN(I2S(idx)), DT_IRQ(I2S(idx), priority),   \
958 			    nrfx_isr, nrfx_i2s_##idx##_irq_handler, 0);	     \
959 		const struct i2s_nrfx_drv_cfg *drv_cfg = dev->config;	     \
960 		int err = pinctrl_apply_state(drv_cfg->pcfg,		     \
961 					      PINCTRL_STATE_DEFAULT);	     \
962 		if (err < 0) {						     \
963 			return err;					     \
964 		}							     \
965 		k_msgq_init(&i2s_nrfx_data##idx.tx_queue,		     \
966 			    (char *)tx_msgs##idx, sizeof(struct i2s_buf),    \
967 			    ARRAY_SIZE(tx_msgs##idx));			     \
968 		k_msgq_init(&i2s_nrfx_data##idx.rx_queue,		     \
969 			    (char *)rx_msgs##idx, sizeof(struct i2s_buf),    \
970 			    ARRAY_SIZE(rx_msgs##idx));			     \
971 		init_clock_manager(dev);				     \
972 		return 0;						     \
973 	}								     \
974 	BUILD_ASSERT(I2S_CLK_SRC(idx) != ACLK || NRF_I2S_HAS_CLKCONFIG,	     \
975 		"Clock source ACLK is not available.");			     \
976 	BUILD_ASSERT(I2S_CLK_SRC(idx) != ACLK ||			     \
977 		     DT_NODE_HAS_PROP(DT_NODELABEL(clock),		     \
978 				      hfclkaudio_frequency),		     \
979 		"Clock source ACLK requires the hfclkaudio-frequency "	     \
980 		"property to be defined in the nordic,nrf-clock node.");     \
981 	DEVICE_DT_DEFINE(I2S(idx), i2s_nrfx_init##idx, NULL,		     \
982 			 &i2s_nrfx_data##idx, &i2s_nrfx_cfg##idx,	     \
983 			 POST_KERNEL, CONFIG_I2S_INIT_PRIORITY,		     \
984 			 &i2s_nrf_drv_api);
985 
986 #ifdef CONFIG_HAS_HW_NRF_I2S0
987 I2S_NRFX_DEVICE(0);
988 #endif
989 
990 #ifdef CONFIG_HAS_HW_NRF_I2S20
991 I2S_NRFX_DEVICE(20);
992 #endif
993