1 /*
2  * Copyright (c) 2017 comsuisse AG
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT atmel_sam_ssc
8 
9 /** @file
10  * @brief I2S bus (SSC) driver for Atmel SAM MCU family.
11  *
12  * Limitations:
13  * - TX and RX path share a common bit clock divider and as a result they cannot
14  *   be configured independently. If RX and TX path are set to different bit
15  *   clock frequencies the latter setting will quietly override the former.
16  *   We should return an error in such a case.
17  * - DMA is used in simple single block transfer mode and as such is not able
18  *   to handle high speed data. To support higher transfer speeds the DMA
19  *   linked list mode should be used.
20  */
21 
22 #include <errno.h>
23 #include <string.h>
24 #include <zephyr/sys/__assert.h>
25 #include <zephyr/kernel.h>
26 #include <zephyr/device.h>
27 #include <zephyr/init.h>
28 #include <zephyr/drivers/dma.h>
29 #include <zephyr/drivers/i2s.h>
30 #include <zephyr/drivers/pinctrl.h>
31 #include <zephyr/drivers/clock_control/atmel_sam_pmc.h>
32 #include <soc.h>
33 
34 #define LOG_DOMAIN dev_i2s_sam_ssc
35 #define LOG_LEVEL CONFIG_I2S_LOG_LEVEL
36 #include <zephyr/logging/log.h>
37 #include <zephyr/irq.h>
38 LOG_MODULE_REGISTER(LOG_DOMAIN);
39 
40 #if __DCACHE_PRESENT == 1
41 #define DCACHE_INVALIDATE(addr, size) \
42 	SCB_InvalidateDCache_by_Addr((uint32_t *)addr, size)
43 #define DCACHE_CLEAN(addr, size) \
44 	SCB_CleanDCache_by_Addr((uint32_t *)addr, size)
45 #else
46 #define DCACHE_INVALIDATE(addr, size) {; }
47 #define DCACHE_CLEAN(addr, size) {; }
48 #endif
49 
50 #define SAM_SSC_WORD_SIZE_BITS_MIN    2
51 #define SAM_SSC_WORD_SIZE_BITS_MAX   32
52 #define SAM_SSC_WORD_PER_FRAME_MIN    1
53 #define SAM_SSC_WORD_PER_FRAME_MAX   16
54 
55 struct queue_item {
56 	void *mem_block;
57 	size_t size;
58 };
59 
60 /* Minimal ring buffer implementation */
61 struct ring_buf {
62 	struct queue_item *buf;
63 	uint16_t len;
64 	uint16_t head;
65 	uint16_t tail;
66 };
67 
68 /* Device constant configuration parameters */
69 struct i2s_sam_dev_cfg {
70 	const struct device *dev_dma;
71 	Ssc *regs;
72 	void (*irq_config)(void);
73 	const struct atmel_sam_pmc_config clock_cfg;
74 	const struct pinctrl_dev_config *pcfg;
75 	uint8_t irq_id;
76 };
77 
78 struct stream {
79 	int32_t state;
80 	struct k_sem sem;
81 	uint32_t dma_channel;
82 	uint8_t dma_perid;
83 	uint8_t word_size_bytes;
84 	bool last_block;
85 	struct i2s_config cfg;
86 	struct ring_buf mem_block_queue;
87 	void *mem_block;
88 	int (*stream_start)(struct stream *, Ssc *const,
89 			    const struct device *);
90 	void (*stream_disable)(struct stream *, Ssc *const,
91 			       const struct device *);
92 	void (*queue_drop)(struct stream *);
93 	int (*set_data_format)(const struct i2s_sam_dev_cfg *const,
94 			       const struct i2s_config *);
95 };
96 
97 /* Device run time data */
98 struct i2s_sam_dev_data {
99 	struct stream rx;
100 	struct stream tx;
101 };
102 
103 #define MODULO_INC(val, max) { val = (++val < max) ? val : 0; }
104 
105 static const struct device *get_dev_from_dma_channel(uint32_t dma_channel);
106 static void dma_rx_callback(const struct device *, void *, uint32_t, int);
107 static void dma_tx_callback(const struct device *, void *, uint32_t, int);
108 static void rx_stream_disable(struct stream *, Ssc *const,
109 			      const struct device *);
110 static void tx_stream_disable(struct stream *, Ssc *const,
111 			      const struct device *);
112 
113 /*
114  * Get data from the queue
115  */
queue_get(struct ring_buf * rb,void ** mem_block,size_t * size)116 static int queue_get(struct ring_buf *rb, void **mem_block, size_t *size)
117 {
118 	unsigned int key;
119 
120 	key = irq_lock();
121 
122 	if (rb->tail == rb->head) {
123 		/* Ring buffer is empty */
124 		irq_unlock(key);
125 		return -ENOMEM;
126 	}
127 
128 	*mem_block = rb->buf[rb->tail].mem_block;
129 	*size = rb->buf[rb->tail].size;
130 	MODULO_INC(rb->tail, rb->len);
131 
132 	irq_unlock(key);
133 
134 	return 0;
135 }
136 
137 /*
138  * Put data in the queue
139  */
queue_put(struct ring_buf * rb,void * mem_block,size_t size)140 static int queue_put(struct ring_buf *rb, void *mem_block, size_t size)
141 {
142 	uint16_t head_next;
143 	unsigned int key;
144 
145 	key = irq_lock();
146 
147 	head_next = rb->head;
148 	MODULO_INC(head_next, rb->len);
149 
150 	if (head_next == rb->tail) {
151 		/* Ring buffer is full */
152 		irq_unlock(key);
153 		return -ENOMEM;
154 	}
155 
156 	rb->buf[rb->head].mem_block = mem_block;
157 	rb->buf[rb->head].size = size;
158 	rb->head = head_next;
159 
160 	irq_unlock(key);
161 
162 	return 0;
163 }
164 
reload_dma(const struct device * dev_dma,uint32_t channel,void * src,void * dst,size_t size)165 static int reload_dma(const struct device *dev_dma, uint32_t channel,
166 		      void *src, void *dst, size_t size)
167 {
168 	int ret;
169 
170 	ret = dma_reload(dev_dma, channel, (uint32_t)src, (uint32_t)dst, size);
171 	if (ret < 0) {
172 		return ret;
173 	}
174 
175 	ret = dma_start(dev_dma, channel);
176 
177 	return ret;
178 }
179 
start_dma(const struct device * dev_dma,uint32_t channel,struct dma_config * cfg,void * src,void * dst,uint32_t blk_size)180 static int start_dma(const struct device *dev_dma, uint32_t channel,
181 		     struct dma_config *cfg, void *src, void *dst,
182 		     uint32_t blk_size)
183 {
184 	struct dma_block_config blk_cfg;
185 	int ret;
186 
187 	(void)memset(&blk_cfg, 0, sizeof(blk_cfg));
188 	blk_cfg.block_size = blk_size;
189 	blk_cfg.source_address = (uint32_t)src;
190 	blk_cfg.dest_address = (uint32_t)dst;
191 
192 	cfg->head_block = &blk_cfg;
193 
194 	ret = dma_config(dev_dma, channel, cfg);
195 	if (ret < 0) {
196 		return ret;
197 	}
198 
199 	ret = dma_start(dev_dma, channel);
200 
201 	return ret;
202 }
203 
204 /* This function is executed in the interrupt context */
dma_rx_callback(const struct device * dma_dev,void * user_data,uint32_t channel,int status)205 static void dma_rx_callback(const struct device *dma_dev, void *user_data,
206 			    uint32_t channel, int status)
207 {
208 	const struct device *dev = get_dev_from_dma_channel(channel);
209 	const struct i2s_sam_dev_cfg *const dev_cfg = dev->config;
210 	struct i2s_sam_dev_data *const dev_data = dev->data;
211 	Ssc *const ssc = dev_cfg->regs;
212 	struct stream *stream = &dev_data->rx;
213 	int ret;
214 
215 	ARG_UNUSED(user_data);
216 	__ASSERT_NO_MSG(stream->mem_block != NULL);
217 
218 	/* Stop reception if there was an error */
219 	if (stream->state == I2S_STATE_ERROR) {
220 		goto rx_disable;
221 	}
222 
223 	/* All block data received */
224 	ret = queue_put(&stream->mem_block_queue, stream->mem_block,
225 			stream->cfg.block_size);
226 	if (ret < 0) {
227 		stream->state = I2S_STATE_ERROR;
228 		goto rx_disable;
229 	}
230 	stream->mem_block = NULL;
231 	k_sem_give(&stream->sem);
232 
233 	/* Stop reception if we were requested */
234 	if (stream->state == I2S_STATE_STOPPING) {
235 		stream->state = I2S_STATE_READY;
236 		goto rx_disable;
237 	}
238 
239 	/* Prepare to receive the next data block */
240 	ret = k_mem_slab_alloc(stream->cfg.mem_slab, &stream->mem_block,
241 			       K_NO_WAIT);
242 	if (ret < 0) {
243 		stream->state = I2S_STATE_ERROR;
244 		goto rx_disable;
245 	}
246 
247 	/* Assure cache coherency before DMA write operation */
248 	DCACHE_INVALIDATE(stream->mem_block, stream->cfg.block_size);
249 
250 	ret = reload_dma(dev_cfg->dev_dma, stream->dma_channel,
251 			 (void *)&(ssc->SSC_RHR), stream->mem_block,
252 			 stream->cfg.block_size);
253 	if (ret < 0) {
254 		LOG_DBG("Failed to reload RX DMA transfer: %d", ret);
255 		goto rx_disable;
256 	}
257 
258 	return;
259 
260 rx_disable:
261 	rx_stream_disable(stream, ssc, dev_cfg->dev_dma);
262 }
263 
264 /* This function is executed in the interrupt context */
dma_tx_callback(const struct device * dma_dev,void * user_data,uint32_t channel,int status)265 static void dma_tx_callback(const struct device *dma_dev, void *user_data,
266 			    uint32_t channel, int status)
267 {
268 	const struct device *dev = get_dev_from_dma_channel(channel);
269 	const struct i2s_sam_dev_cfg *const dev_cfg = dev->config;
270 	struct i2s_sam_dev_data *const dev_data = dev->data;
271 	Ssc *const ssc = dev_cfg->regs;
272 	struct stream *stream = &dev_data->tx;
273 	size_t mem_block_size;
274 	int ret;
275 
276 	ARG_UNUSED(user_data);
277 	__ASSERT_NO_MSG(stream->mem_block != NULL);
278 
279 	/* All block data sent */
280 	k_mem_slab_free(stream->cfg.mem_slab, stream->mem_block);
281 	stream->mem_block = NULL;
282 
283 	/* Stop transmission if there was an error */
284 	if (stream->state == I2S_STATE_ERROR) {
285 		LOG_DBG("TX error detected");
286 		goto tx_disable;
287 	}
288 
289 	/* Stop transmission if we were requested */
290 	if (stream->last_block) {
291 		stream->state = I2S_STATE_READY;
292 		goto tx_disable;
293 	}
294 
295 	/* Prepare to send the next data block */
296 	ret = queue_get(&stream->mem_block_queue, &stream->mem_block,
297 			&mem_block_size);
298 	if (ret < 0) {
299 		if (stream->state == I2S_STATE_STOPPING) {
300 			stream->state = I2S_STATE_READY;
301 		} else {
302 			stream->state = I2S_STATE_ERROR;
303 		}
304 		goto tx_disable;
305 	}
306 	k_sem_give(&stream->sem);
307 
308 	/* Assure cache coherency before DMA read operation */
309 	DCACHE_CLEAN(stream->mem_block, mem_block_size);
310 
311 	ret = reload_dma(dev_cfg->dev_dma, stream->dma_channel,
312 			 stream->mem_block, (void *)&(ssc->SSC_THR),
313 			 mem_block_size);
314 	if (ret < 0) {
315 		LOG_DBG("Failed to reload TX DMA transfer: %d", ret);
316 		goto tx_disable;
317 	}
318 
319 	return;
320 
321 tx_disable:
322 	tx_stream_disable(stream, ssc, dev_cfg->dev_dma);
323 }
324 
set_rx_data_format(const struct i2s_sam_dev_cfg * const dev_cfg,const struct i2s_config * i2s_cfg)325 static int set_rx_data_format(const struct i2s_sam_dev_cfg *const dev_cfg,
326 			      const struct i2s_config *i2s_cfg)
327 {
328 	Ssc *const ssc = dev_cfg->regs;
329 	const bool pin_rk_en = IS_ENABLED(CONFIG_I2S_SAM_SSC_0_PIN_RK_EN);
330 	const bool pin_rf_en = IS_ENABLED(CONFIG_I2S_SAM_SSC_0_PIN_RF_EN);
331 	uint8_t word_size_bits = i2s_cfg->word_size;
332 	uint8_t num_words = i2s_cfg->channels;
333 	uint8_t fslen = 0U;
334 	uint32_t ssc_rcmr = 0U;
335 	uint32_t ssc_rfmr = 0U;
336 	bool frame_clk_master = !(i2s_cfg->options & I2S_OPT_FRAME_CLK_SLAVE);
337 
338 	switch (i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) {
339 
340 	case I2S_FMT_DATA_FORMAT_I2S:
341 		num_words = 2U;
342 		fslen = word_size_bits - 1;
343 
344 		ssc_rcmr = SSC_RCMR_CKI
345 			   | (pin_rf_en ? SSC_RCMR_START_RF_FALLING : 0)
346 			   | SSC_RCMR_STTDLY(1);
347 
348 		ssc_rfmr = (pin_rf_en && frame_clk_master
349 			    ? SSC_RFMR_FSOS_NEGATIVE : SSC_RFMR_FSOS_NONE);
350 		break;
351 
352 	case I2S_FMT_DATA_FORMAT_PCM_SHORT:
353 		ssc_rcmr = (pin_rf_en ? SSC_RCMR_START_RF_FALLING : 0)
354 			   | SSC_RCMR_STTDLY(0);
355 
356 		ssc_rfmr = (pin_rf_en && frame_clk_master
357 			    ? SSC_RFMR_FSOS_POSITIVE : SSC_RFMR_FSOS_NONE);
358 		break;
359 
360 	case I2S_FMT_DATA_FORMAT_PCM_LONG:
361 		fslen = num_words * word_size_bits / 2U - 1;
362 
363 		ssc_rcmr = (pin_rf_en ? SSC_RCMR_START_RF_RISING : 0)
364 			   | SSC_RCMR_STTDLY(0);
365 
366 		ssc_rfmr = (pin_rf_en && frame_clk_master
367 			    ? SSC_RFMR_FSOS_POSITIVE : SSC_RFMR_FSOS_NONE);
368 		break;
369 
370 	case I2S_FMT_DATA_FORMAT_LEFT_JUSTIFIED:
371 		fslen = num_words * word_size_bits / 2U - 1;
372 
373 		ssc_rcmr = SSC_RCMR_CKI
374 			   | (pin_rf_en ? SSC_RCMR_START_RF_RISING : 0)
375 			   | SSC_RCMR_STTDLY(0);
376 
377 		ssc_rfmr = (pin_rf_en && frame_clk_master
378 			    ? SSC_RFMR_FSOS_POSITIVE : SSC_RFMR_FSOS_NONE);
379 		break;
380 
381 	default:
382 		LOG_ERR("Unsupported I2S data format");
383 		return -EINVAL;
384 	}
385 
386 	if (pin_rk_en) {
387 		ssc_rcmr |= ((i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE)
388 			     ? SSC_RCMR_CKS_RK : SSC_RCMR_CKS_MCK)
389 			    | ((i2s_cfg->options & I2S_OPT_BIT_CLK_GATED)
390 			       ? SSC_RCMR_CKO_TRANSFER : SSC_RCMR_CKO_CONTINUOUS);
391 	} else {
392 		ssc_rcmr |= SSC_RCMR_CKS_TK
393 			    | SSC_RCMR_CKO_NONE;
394 	}
395 	/* SSC_RCMR.PERIOD bit filed does not support setting the
396 	 * frame period with one bit resolution. In case the required
397 	 * frame period is an odd number set it to be one bit longer.
398 	 */
399 	ssc_rcmr |= (pin_rf_en ? 0 : SSC_RCMR_START_TRANSMIT)
400 		    | SSC_RCMR_PERIOD((num_words * word_size_bits + 1) / 2U - 1);
401 
402 	/* Receive Clock Mode Register */
403 	ssc->SSC_RCMR = ssc_rcmr;
404 
405 	ssc_rfmr |= SSC_RFMR_DATLEN(word_size_bits - 1)
406 		    | ((i2s_cfg->format & I2S_FMT_DATA_ORDER_LSB)
407 		       ? 0 : SSC_RFMR_MSBF)
408 		    | SSC_RFMR_DATNB(num_words - 1)
409 		    | SSC_RFMR_FSLEN(fslen)
410 		    | SSC_RFMR_FSLEN_EXT(fslen >> 4);
411 
412 	/* Receive Frame Mode Register */
413 	ssc->SSC_RFMR = ssc_rfmr;
414 
415 	return 0;
416 }
417 
set_tx_data_format(const struct i2s_sam_dev_cfg * const dev_cfg,const struct i2s_config * i2s_cfg)418 static int set_tx_data_format(const struct i2s_sam_dev_cfg *const dev_cfg,
419 			      const struct i2s_config *i2s_cfg)
420 {
421 	Ssc *const ssc = dev_cfg->regs;
422 	uint8_t word_size_bits = i2s_cfg->word_size;
423 	uint8_t num_words = i2s_cfg->channels;
424 	uint8_t fslen = 0U;
425 	uint32_t ssc_tcmr = 0U;
426 	uint32_t ssc_tfmr = 0U;
427 
428 	switch (i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) {
429 
430 	case I2S_FMT_DATA_FORMAT_I2S:
431 		num_words = 2U;
432 		fslen = word_size_bits - 1;
433 
434 		ssc_tcmr = SSC_TCMR_START_TF_FALLING
435 			   | SSC_TCMR_STTDLY(1);
436 
437 		ssc_tfmr = SSC_TFMR_FSOS_NEGATIVE;
438 		break;
439 
440 	case I2S_FMT_DATA_FORMAT_PCM_SHORT:
441 		ssc_tcmr = SSC_TCMR_CKI
442 			   | SSC_TCMR_START_TF_FALLING
443 			   | SSC_TCMR_STTDLY(0);
444 
445 		ssc_tfmr = SSC_TFMR_FSOS_POSITIVE;
446 		break;
447 
448 	case I2S_FMT_DATA_FORMAT_PCM_LONG:
449 		fslen = num_words * word_size_bits / 2U - 1;
450 
451 		ssc_tcmr = SSC_TCMR_CKI
452 			   | SSC_TCMR_START_TF_RISING
453 			   | SSC_TCMR_STTDLY(0);
454 
455 		ssc_tfmr = SSC_TFMR_FSOS_POSITIVE;
456 		break;
457 
458 	case I2S_FMT_DATA_FORMAT_LEFT_JUSTIFIED:
459 		fslen = num_words * word_size_bits / 2U - 1;
460 
461 		ssc_tcmr = SSC_TCMR_START_TF_RISING
462 			   | SSC_TCMR_STTDLY(0);
463 
464 		ssc_tfmr = SSC_TFMR_FSOS_POSITIVE;
465 		break;
466 
467 	default:
468 		LOG_ERR("Unsupported I2S data format");
469 		return -EINVAL;
470 	}
471 
472 	/* SSC_TCMR.PERIOD bit filed does not support setting the
473 	 * frame period with one bit resolution. In case the required
474 	 * frame period is an odd number set it to be one bit longer.
475 	 */
476 	ssc_tcmr |= ((i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE)
477 		     ? SSC_TCMR_CKS_TK : SSC_TCMR_CKS_MCK)
478 		    | ((i2s_cfg->options & I2S_OPT_BIT_CLK_GATED)
479 		       ? SSC_TCMR_CKO_TRANSFER : SSC_TCMR_CKO_CONTINUOUS)
480 		    | SSC_TCMR_PERIOD((num_words * word_size_bits + 1) / 2U - 1);
481 
482 	/* Transmit Clock Mode Register */
483 	ssc->SSC_TCMR = ssc_tcmr;
484 
485 	if (i2s_cfg->options & I2S_OPT_FRAME_CLK_SLAVE) {
486 		ssc_tfmr &= ~SSC_TFMR_FSOS_Msk;
487 		ssc_tfmr |= SSC_TFMR_FSOS_NONE;
488 	}
489 
490 	ssc_tfmr |= SSC_TFMR_DATLEN(word_size_bits - 1)
491 		    | ((i2s_cfg->format & I2S_FMT_DATA_ORDER_LSB)
492 		       ? 0 : SSC_TFMR_MSBF)
493 		    | SSC_TFMR_DATNB(num_words - 1)
494 		    | SSC_TFMR_FSLEN(fslen)
495 		    | SSC_TFMR_FSLEN_EXT(fslen >> 4);
496 
497 	/* Transmit Frame Mode Register */
498 	ssc->SSC_TFMR = ssc_tfmr;
499 
500 	return 0;
501 }
502 
503 /* Calculate number of bytes required to store a word of bit_size length */
get_word_size_bytes(uint8_t bit_size)504 static uint8_t get_word_size_bytes(uint8_t bit_size)
505 {
506 	uint8_t byte_size_min = (bit_size + 7) / 8U;
507 	uint8_t byte_size;
508 
509 	byte_size = (byte_size_min == 3U) ? 4 : byte_size_min;
510 
511 	return byte_size;
512 }
513 
bit_clock_set(Ssc * const ssc,uint32_t bit_clk_freq)514 static int bit_clock_set(Ssc *const ssc, uint32_t bit_clk_freq)
515 {
516 	uint32_t clk_div = SOC_ATMEL_SAM_MCK_FREQ_HZ / bit_clk_freq / 2U;
517 
518 	if (clk_div == 0U || clk_div >= (1 << 12)) {
519 		LOG_ERR("Invalid bit clock frequency");
520 		return -EINVAL;
521 	}
522 
523 	ssc->SSC_CMR = clk_div;
524 
525 	LOG_DBG("freq = %d", bit_clk_freq);
526 
527 	return 0;
528 }
529 
i2s_sam_config_get(const struct device * dev,enum i2s_dir dir)530 static const struct i2s_config *i2s_sam_config_get(const struct device *dev,
531 						   enum i2s_dir dir)
532 {
533 	struct i2s_sam_dev_data *const dev_data = dev->data;
534 	struct stream *stream;
535 
536 	if (dir == I2S_DIR_RX) {
537 		stream = &dev_data->rx;
538 	} else {
539 		stream = &dev_data->tx;
540 	}
541 
542 	if (stream->state == I2S_STATE_NOT_READY) {
543 		return NULL;
544 	}
545 
546 	return &stream->cfg;
547 }
548 
i2s_sam_configure(const struct device * dev,enum i2s_dir dir,const struct i2s_config * i2s_cfg)549 static int i2s_sam_configure(const struct device *dev, enum i2s_dir dir,
550 			     const struct i2s_config *i2s_cfg)
551 {
552 	const struct i2s_sam_dev_cfg *const dev_cfg = dev->config;
553 	struct i2s_sam_dev_data *const dev_data = dev->data;
554 	Ssc *const ssc = dev_cfg->regs;
555 	uint8_t num_words = i2s_cfg->channels;
556 	uint8_t word_size_bits = i2s_cfg->word_size;
557 	uint32_t bit_clk_freq;
558 	struct stream *stream;
559 	int ret;
560 
561 	if (dir == I2S_DIR_RX) {
562 		stream = &dev_data->rx;
563 	} else if (dir == I2S_DIR_TX) {
564 		stream = &dev_data->tx;
565 	} else if (dir == I2S_DIR_BOTH) {
566 		return -ENOSYS;
567 	} else {
568 		LOG_ERR("Either RX or TX direction must be selected");
569 		return -EINVAL;
570 	}
571 
572 	if (stream->state != I2S_STATE_NOT_READY &&
573 	    stream->state != I2S_STATE_READY) {
574 		LOG_ERR("invalid state");
575 		return -EINVAL;
576 	}
577 
578 	if (i2s_cfg->frame_clk_freq == 0U) {
579 		stream->queue_drop(stream);
580 		(void)memset(&stream->cfg, 0, sizeof(struct i2s_config));
581 		stream->state = I2S_STATE_NOT_READY;
582 		return 0;
583 	}
584 
585 	if (i2s_cfg->format & I2S_FMT_FRAME_CLK_INV) {
586 		LOG_ERR("Frame clock inversion is not implemented");
587 		LOG_ERR("Please submit a patch");
588 		return -EINVAL;
589 	}
590 
591 	if (i2s_cfg->format & I2S_FMT_BIT_CLK_INV) {
592 		LOG_ERR("Bit clock inversion is not implemented");
593 		LOG_ERR("Please submit a patch");
594 		return -EINVAL;
595 	}
596 
597 	if (word_size_bits < SAM_SSC_WORD_SIZE_BITS_MIN ||
598 	    word_size_bits > SAM_SSC_WORD_SIZE_BITS_MAX) {
599 		LOG_ERR("Unsupported I2S word size");
600 		return -EINVAL;
601 	}
602 
603 	if (num_words < SAM_SSC_WORD_PER_FRAME_MIN ||
604 	    num_words > SAM_SSC_WORD_PER_FRAME_MAX) {
605 		LOG_ERR("Unsupported words per frame number");
606 		return -EINVAL;
607 	}
608 
609 	memcpy(&stream->cfg, i2s_cfg, sizeof(struct i2s_config));
610 
611 	bit_clk_freq = i2s_cfg->frame_clk_freq * word_size_bits * num_words;
612 	ret = bit_clock_set(ssc, bit_clk_freq);
613 	if (ret < 0) {
614 		return ret;
615 	}
616 
617 	ret = stream->set_data_format(dev_cfg, i2s_cfg);
618 	if (ret < 0) {
619 		return ret;
620 	}
621 
622 	/* Set up DMA channel parameters */
623 	stream->word_size_bytes = get_word_size_bytes(word_size_bits);
624 
625 	if (i2s_cfg->options & I2S_OPT_LOOPBACK) {
626 		ssc->SSC_RFMR |= SSC_RFMR_LOOP;
627 	}
628 
629 	stream->state = I2S_STATE_READY;
630 
631 	return 0;
632 }
633 
rx_stream_start(struct stream * stream,Ssc * const ssc,const struct device * dev_dma)634 static int rx_stream_start(struct stream *stream, Ssc *const ssc,
635 			   const struct device *dev_dma)
636 {
637 	int ret;
638 
639 	ret = k_mem_slab_alloc(stream->cfg.mem_slab, &stream->mem_block,
640 			       K_NO_WAIT);
641 	if (ret < 0) {
642 		return ret;
643 	}
644 
645 	/* Workaround for a hardware bug: DMA engine will read first data
646 	 * item even if SSC_SR.RXEN (Receive Enable) is not set. An extra read
647 	 * before enabling DMA engine sets hardware FSM in the correct state.
648 	 */
649 	(void)ssc->SSC_RHR;
650 
651 	struct dma_config dma_cfg = {
652 		.source_data_size = stream->word_size_bytes,
653 		.dest_data_size = stream->word_size_bytes,
654 		.block_count = 1,
655 		.dma_slot = stream->dma_perid,
656 		.channel_direction = PERIPHERAL_TO_MEMORY,
657 		.source_burst_length = 1,
658 		.dest_burst_length = 1,
659 		.dma_callback = dma_rx_callback,
660 	};
661 
662 	ret = start_dma(dev_dma, stream->dma_channel, &dma_cfg,
663 			(void *)&(ssc->SSC_RHR), stream->mem_block,
664 			stream->cfg.block_size);
665 	if (ret < 0) {
666 		LOG_ERR("Failed to start RX DMA transfer: %d", ret);
667 		return ret;
668 	}
669 
670 	/* Clear status register */
671 	(void)ssc->SSC_SR;
672 
673 	ssc->SSC_IER = SSC_IER_OVRUN;
674 
675 	ssc->SSC_CR = SSC_CR_RXEN;
676 
677 	return 0;
678 }
679 
tx_stream_start(struct stream * stream,Ssc * const ssc,const struct device * dev_dma)680 static int tx_stream_start(struct stream *stream, Ssc *const ssc,
681 			   const struct device *dev_dma)
682 {
683 	size_t mem_block_size;
684 	int ret;
685 
686 	ret = queue_get(&stream->mem_block_queue, &stream->mem_block,
687 			&mem_block_size);
688 	if (ret < 0) {
689 		return ret;
690 	}
691 	k_sem_give(&stream->sem);
692 
693 	/* Workaround for a hardware bug: DMA engine will transfer first data
694 	 * item even if SSC_SR.TXEN (Transmit Enable) is not set. An extra write
695 	 * before enabling DMA engine sets hardware FSM in the correct state.
696 	 * This data item will not be output on I2S interface.
697 	 */
698 	ssc->SSC_THR = 0;
699 
700 	struct dma_config dma_cfg = {
701 		.source_data_size = stream->word_size_bytes,
702 		.dest_data_size = stream->word_size_bytes,
703 		.block_count = 1,
704 		.dma_slot = stream->dma_perid,
705 		.channel_direction = MEMORY_TO_PERIPHERAL,
706 		.source_burst_length = 1,
707 		.dest_burst_length = 1,
708 		.dma_callback = dma_tx_callback,
709 	};
710 
711 	/* Assure cache coherency before DMA read operation */
712 	DCACHE_CLEAN(stream->mem_block, mem_block_size);
713 
714 	ret = start_dma(dev_dma, stream->dma_channel, &dma_cfg,
715 			stream->mem_block, (void *)&(ssc->SSC_THR),
716 			mem_block_size);
717 	if (ret < 0) {
718 		LOG_ERR("Failed to start TX DMA transfer: %d", ret);
719 		return ret;
720 	}
721 
722 	/* Clear status register */
723 	(void)ssc->SSC_SR;
724 
725 	ssc->SSC_IER = SSC_IER_TXEMPTY;
726 
727 	ssc->SSC_CR = SSC_CR_TXEN;
728 
729 	return 0;
730 }
731 
rx_stream_disable(struct stream * stream,Ssc * const ssc,const struct device * dev_dma)732 static void rx_stream_disable(struct stream *stream, Ssc *const ssc,
733 			      const struct device *dev_dma)
734 {
735 	ssc->SSC_CR = SSC_CR_RXDIS;
736 	ssc->SSC_IDR = SSC_IDR_OVRUN;
737 	dma_stop(dev_dma, stream->dma_channel);
738 	if (stream->mem_block != NULL) {
739 		k_mem_slab_free(stream->cfg.mem_slab, stream->mem_block);
740 		stream->mem_block = NULL;
741 	}
742 }
743 
tx_stream_disable(struct stream * stream,Ssc * const ssc,const struct device * dev_dma)744 static void tx_stream_disable(struct stream *stream, Ssc *const ssc,
745 			      const struct device *dev_dma)
746 {
747 	ssc->SSC_CR = SSC_CR_TXDIS;
748 	ssc->SSC_IDR = SSC_IDR_TXEMPTY;
749 	dma_stop(dev_dma, stream->dma_channel);
750 	if (stream->mem_block != NULL) {
751 		k_mem_slab_free(stream->cfg.mem_slab, stream->mem_block);
752 		stream->mem_block = NULL;
753 	}
754 }
755 
rx_queue_drop(struct stream * stream)756 static void rx_queue_drop(struct stream *stream)
757 {
758 	size_t size;
759 	void *mem_block;
760 
761 	while (queue_get(&stream->mem_block_queue, &mem_block, &size) == 0) {
762 		k_mem_slab_free(stream->cfg.mem_slab, mem_block);
763 	}
764 
765 	k_sem_reset(&stream->sem);
766 }
767 
tx_queue_drop(struct stream * stream)768 static void tx_queue_drop(struct stream *stream)
769 {
770 	size_t size;
771 	void *mem_block;
772 	unsigned int n = 0U;
773 
774 	while (queue_get(&stream->mem_block_queue, &mem_block, &size) == 0) {
775 		k_mem_slab_free(stream->cfg.mem_slab, mem_block);
776 		n++;
777 	}
778 
779 	for (; n > 0; n--) {
780 		k_sem_give(&stream->sem);
781 	}
782 }
783 
i2s_sam_trigger(const struct device * dev,enum i2s_dir dir,enum i2s_trigger_cmd cmd)784 static int i2s_sam_trigger(const struct device *dev, enum i2s_dir dir,
785 			   enum i2s_trigger_cmd cmd)
786 {
787 	const struct i2s_sam_dev_cfg *const dev_cfg = dev->config;
788 	struct i2s_sam_dev_data *const dev_data = dev->data;
789 	Ssc *const ssc = dev_cfg->regs;
790 	struct stream *stream;
791 	unsigned int key;
792 	int ret;
793 
794 	if (dir == I2S_DIR_RX) {
795 		stream = &dev_data->rx;
796 	} else if (dir == I2S_DIR_TX) {
797 		stream = &dev_data->tx;
798 	} else if (dir == I2S_DIR_BOTH) {
799 		return -ENOSYS;
800 	} else {
801 		LOG_ERR("Either RX or TX direction must be selected");
802 		return -EINVAL;
803 	}
804 
805 	switch (cmd) {
806 	case I2S_TRIGGER_START:
807 		if (stream->state != I2S_STATE_READY) {
808 			LOG_DBG("START trigger: invalid state");
809 			return -EIO;
810 		}
811 
812 		__ASSERT_NO_MSG(stream->mem_block == NULL);
813 
814 		ret = stream->stream_start(stream, ssc, dev_cfg->dev_dma);
815 		if (ret < 0) {
816 			LOG_DBG("START trigger failed %d", ret);
817 			return ret;
818 		}
819 
820 		stream->state = I2S_STATE_RUNNING;
821 		stream->last_block = false;
822 		break;
823 
824 	case I2S_TRIGGER_STOP:
825 		key = irq_lock();
826 		if (stream->state != I2S_STATE_RUNNING) {
827 			irq_unlock(key);
828 			LOG_DBG("STOP trigger: invalid state");
829 			return -EIO;
830 		}
831 		stream->state = I2S_STATE_STOPPING;
832 		irq_unlock(key);
833 		stream->last_block = true;
834 		break;
835 
836 	case I2S_TRIGGER_DRAIN:
837 		key = irq_lock();
838 		if (stream->state != I2S_STATE_RUNNING) {
839 			irq_unlock(key);
840 			LOG_DBG("DRAIN trigger: invalid state");
841 			return -EIO;
842 		}
843 		stream->state = I2S_STATE_STOPPING;
844 		irq_unlock(key);
845 		break;
846 
847 	case I2S_TRIGGER_DROP:
848 		if (stream->state == I2S_STATE_NOT_READY) {
849 			LOG_DBG("DROP trigger: invalid state");
850 			return -EIO;
851 		}
852 		stream->stream_disable(stream, ssc, dev_cfg->dev_dma);
853 		stream->queue_drop(stream);
854 		stream->state = I2S_STATE_READY;
855 		break;
856 
857 	case I2S_TRIGGER_PREPARE:
858 		if (stream->state != I2S_STATE_ERROR) {
859 			LOG_DBG("PREPARE trigger: invalid state");
860 			return -EIO;
861 		}
862 		stream->state = I2S_STATE_READY;
863 		stream->queue_drop(stream);
864 		break;
865 
866 	default:
867 		LOG_ERR("Unsupported trigger command");
868 		return -EINVAL;
869 	}
870 
871 	return 0;
872 }
873 
i2s_sam_read(const struct device * dev,void ** mem_block,size_t * size)874 static int i2s_sam_read(const struct device *dev, void **mem_block,
875 			size_t *size)
876 {
877 	struct i2s_sam_dev_data *const dev_data = dev->data;
878 	int ret;
879 
880 	if (dev_data->rx.state == I2S_STATE_NOT_READY) {
881 		LOG_DBG("invalid state");
882 		return -EIO;
883 	}
884 
885 	if (dev_data->rx.state != I2S_STATE_ERROR) {
886 		ret = k_sem_take(&dev_data->rx.sem,
887 				 SYS_TIMEOUT_MS(dev_data->rx.cfg.timeout));
888 		if (ret < 0) {
889 			return ret;
890 		}
891 	}
892 
893 	/* Get data from the beginning of RX queue */
894 	ret = queue_get(&dev_data->rx.mem_block_queue, mem_block, size);
895 	if (ret < 0) {
896 		return -EIO;
897 	}
898 
899 	return 0;
900 }
901 
i2s_sam_write(const struct device * dev,void * mem_block,size_t size)902 static int i2s_sam_write(const struct device *dev, void *mem_block,
903 			 size_t size)
904 {
905 	struct i2s_sam_dev_data *const dev_data = dev->data;
906 	int ret;
907 
908 	if (dev_data->tx.state != I2S_STATE_RUNNING &&
909 	    dev_data->tx.state != I2S_STATE_READY) {
910 		LOG_DBG("invalid state");
911 		return -EIO;
912 	}
913 
914 	ret = k_sem_take(&dev_data->tx.sem,
915 			 SYS_TIMEOUT_MS(dev_data->tx.cfg.timeout));
916 	if (ret < 0) {
917 		return ret;
918 	}
919 
920 	/* Add data to the end of the TX queue */
921 	queue_put(&dev_data->tx.mem_block_queue, mem_block, size);
922 
923 	return 0;
924 }
925 
i2s_sam_isr(const struct device * dev)926 static void i2s_sam_isr(const struct device *dev)
927 {
928 	const struct i2s_sam_dev_cfg *const dev_cfg = dev->config;
929 	struct i2s_sam_dev_data *const dev_data = dev->data;
930 	Ssc *const ssc = dev_cfg->regs;
931 	uint32_t isr_status;
932 
933 	/* Retrieve interrupt status */
934 	isr_status = ssc->SSC_SR & ssc->SSC_IMR;
935 
936 	/* Check for RX buffer overrun */
937 	if (isr_status & SSC_SR_OVRUN) {
938 		dev_data->rx.state = I2S_STATE_ERROR;
939 		/* Disable interrupt */
940 		ssc->SSC_IDR = SSC_IDR_OVRUN;
941 		LOG_DBG("RX buffer overrun error");
942 	}
943 	/* Check for TX buffer underrun */
944 	if (isr_status & SSC_SR_TXEMPTY) {
945 		dev_data->tx.state = I2S_STATE_ERROR;
946 		/* Disable interrupt */
947 		ssc->SSC_IDR = SSC_IDR_TXEMPTY;
948 		LOG_DBG("TX buffer underrun error");
949 	}
950 }
951 
i2s_sam_initialize(const struct device * dev)952 static int i2s_sam_initialize(const struct device *dev)
953 {
954 	const struct i2s_sam_dev_cfg *const dev_cfg = dev->config;
955 	struct i2s_sam_dev_data *const dev_data = dev->data;
956 	Ssc *const ssc = dev_cfg->regs;
957 	int ret;
958 
959 	/* Configure interrupts */
960 	dev_cfg->irq_config();
961 
962 	/* Initialize semaphores */
963 	k_sem_init(&dev_data->rx.sem, 0, CONFIG_I2S_SAM_SSC_RX_BLOCK_COUNT);
964 	k_sem_init(&dev_data->tx.sem, CONFIG_I2S_SAM_SSC_TX_BLOCK_COUNT,
965 		   CONFIG_I2S_SAM_SSC_TX_BLOCK_COUNT);
966 
967 	if (!device_is_ready(dev_cfg->dev_dma)) {
968 		LOG_ERR("%s device not ready", dev_cfg->dev_dma->name);
969 		return -ENODEV;
970 	}
971 
972 	/* Connect pins to the peripheral */
973 	ret = pinctrl_apply_state(dev_cfg->pcfg, PINCTRL_STATE_DEFAULT);
974 	if (ret < 0) {
975 		return ret;
976 	}
977 
978 	/* Enable SSC clock in PMC */
979 	(void)clock_control_on(SAM_DT_PMC_CONTROLLER,
980 			       (clock_control_subsys_t)&dev_cfg->clock_cfg);
981 
982 	/* Reset the module, disable receiver & transmitter */
983 	ssc->SSC_CR = SSC_CR_RXDIS | SSC_CR_TXDIS | SSC_CR_SWRST;
984 
985 	/* Enable module's IRQ */
986 	irq_enable(dev_cfg->irq_id);
987 
988 	LOG_INF("Device %s initialized", dev->name);
989 
990 	return 0;
991 }
992 
993 static DEVICE_API(i2s, i2s_sam_driver_api) = {
994 	.configure = i2s_sam_configure,
995 	.config_get = i2s_sam_config_get,
996 	.read = i2s_sam_read,
997 	.write = i2s_sam_write,
998 	.trigger = i2s_sam_trigger,
999 };
1000 
1001 /* I2S0 */
1002 
get_dev_from_dma_channel(uint32_t dma_channel)1003 static const struct device *get_dev_from_dma_channel(uint32_t dma_channel)
1004 {
1005 	return &DEVICE_DT_NAME_GET(DT_DRV_INST(0));
1006 }
1007 
i2s0_sam_irq_config(void)1008 static void i2s0_sam_irq_config(void)
1009 {
1010 	IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), i2s_sam_isr,
1011 		    DEVICE_DT_INST_GET(0), 0);
1012 }
1013 
1014 PINCTRL_DT_INST_DEFINE(0);
1015 
1016 static const struct i2s_sam_dev_cfg i2s0_sam_config = {
1017 	.dev_dma = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(0, tx)),
1018 	.regs = (Ssc *)DT_INST_REG_ADDR(0),
1019 	.irq_config = i2s0_sam_irq_config,
1020 	.clock_cfg = SAM_DT_INST_CLOCK_PMC_CFG(0),
1021 	.irq_id = DT_INST_IRQN(0),
1022 	.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0),
1023 };
1024 
1025 struct queue_item rx_0_ring_buf[CONFIG_I2S_SAM_SSC_RX_BLOCK_COUNT + 1];
1026 struct queue_item tx_0_ring_buf[CONFIG_I2S_SAM_SSC_TX_BLOCK_COUNT + 1];
1027 
1028 static struct i2s_sam_dev_data i2s0_sam_data = {
1029 	.rx = {
1030 		.dma_channel = DT_INST_DMAS_CELL_BY_NAME(0, rx, channel),
1031 		.dma_perid = DT_INST_DMAS_CELL_BY_NAME(0, rx, perid),
1032 		.mem_block_queue.buf = rx_0_ring_buf,
1033 		.mem_block_queue.len = ARRAY_SIZE(rx_0_ring_buf),
1034 		.stream_start = rx_stream_start,
1035 		.stream_disable = rx_stream_disable,
1036 		.queue_drop = rx_queue_drop,
1037 		.set_data_format = set_rx_data_format,
1038 	},
1039 	.tx = {
1040 		.dma_channel = DT_INST_DMAS_CELL_BY_NAME(0, tx, channel),
1041 		.dma_perid = DT_INST_DMAS_CELL_BY_NAME(0, tx, perid),
1042 		.mem_block_queue.buf = tx_0_ring_buf,
1043 		.mem_block_queue.len = ARRAY_SIZE(tx_0_ring_buf),
1044 		.stream_start = tx_stream_start,
1045 		.stream_disable = tx_stream_disable,
1046 		.queue_drop = tx_queue_drop,
1047 		.set_data_format = set_tx_data_format,
1048 	},
1049 };
1050 
1051 DEVICE_DT_INST_DEFINE(0, &i2s_sam_initialize, NULL,
1052 		    &i2s0_sam_data, &i2s0_sam_config, POST_KERNEL,
1053 		    CONFIG_I2S_INIT_PRIORITY, &i2s_sam_driver_api);
1054