1 /*
2  * Copyright (c) 2021 BrainCo Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT gd_gd32_spi
8 
9 #include <errno.h>
10 #include <zephyr/kernel.h>
11 #include <zephyr/drivers/clock_control.h>
12 #include <zephyr/drivers/clock_control/gd32.h>
13 #include <zephyr/drivers/pinctrl.h>
14 #include <zephyr/drivers/reset.h>
15 #include <zephyr/drivers/spi.h>
16 #include <zephyr/drivers/spi/rtio.h>
17 #ifdef CONFIG_SPI_GD32_DMA
18 #include <zephyr/drivers/dma.h>
19 #include <zephyr/drivers/dma/dma_gd32.h>
20 #endif
21 
22 #include <gd32_spi.h>
23 
24 #include <zephyr/logging/log.h>
25 #include <zephyr/irq.h>
26 LOG_MODULE_REGISTER(spi_gd32);
27 
28 #include "spi_context.h"
29 
30 /* SPI error status mask. */
31 #define SPI_GD32_ERR_MASK	(SPI_STAT_RXORERR | SPI_STAT_CONFERR | SPI_STAT_CRCERR)
32 
33 #define GD32_SPI_PSC_MAX	0x7U
34 
35 #ifdef CONFIG_SPI_GD32_DMA
36 
37 enum spi_gd32_dma_direction {
38 	RX = 0,
39 	TX,
40 	NUM_OF_DIRECTION
41 };
42 
43 struct spi_gd32_dma_config {
44 	const struct device *dev;
45 	uint32_t channel;
46 	uint32_t config;
47 	uint32_t slot;
48 	uint32_t fifo_threshold;
49 };
50 
51 struct spi_gd32_dma_data {
52 	struct dma_config config;
53 	struct dma_block_config block;
54 	uint32_t count;
55 };
56 
57 #endif
58 
59 struct spi_gd32_config {
60 	uint32_t reg;
61 	uint16_t clkid;
62 	struct reset_dt_spec reset;
63 	const struct pinctrl_dev_config *pcfg;
64 #ifdef CONFIG_SPI_GD32_DMA
65 	const struct spi_gd32_dma_config dma[NUM_OF_DIRECTION];
66 #endif
67 #ifdef CONFIG_SPI_GD32_INTERRUPT
68 	void (*irq_configure)();
69 #endif
70 };
71 
72 struct spi_gd32_data {
73 	struct spi_context ctx;
74 #ifdef CONFIG_SPI_GD32_DMA
75 	struct spi_gd32_dma_data dma[NUM_OF_DIRECTION];
76 #endif
77 };
78 
79 #ifdef CONFIG_SPI_GD32_DMA
80 
81 static uint32_t dummy_tx;
82 static uint32_t dummy_rx;
83 
spi_gd32_dma_enabled(const struct device * dev)84 static bool spi_gd32_dma_enabled(const struct device *dev)
85 {
86 	const struct spi_gd32_config *cfg = dev->config;
87 
88 	if (cfg->dma[TX].dev && cfg->dma[RX].dev) {
89 		return true;
90 	}
91 
92 	return false;
93 }
94 
spi_gd32_dma_enabled_num(const struct device * dev)95 static size_t spi_gd32_dma_enabled_num(const struct device *dev)
96 {
97 	return spi_gd32_dma_enabled(dev) ? 2 : 0;
98 }
99 
100 #endif
101 
spi_gd32_get_err(const struct spi_gd32_config * cfg)102 static int spi_gd32_get_err(const struct spi_gd32_config *cfg)
103 {
104 	uint32_t stat = SPI_STAT(cfg->reg);
105 
106 	if (stat & SPI_GD32_ERR_MASK) {
107 		LOG_ERR("spi%u error status detected, err = %u",
108 			cfg->reg, stat & (uint32_t)SPI_GD32_ERR_MASK);
109 
110 		return -EIO;
111 	}
112 
113 	return 0;
114 }
115 
spi_gd32_transfer_ongoing(struct spi_gd32_data * data)116 static bool spi_gd32_transfer_ongoing(struct spi_gd32_data *data)
117 {
118 	return spi_context_tx_on(&data->ctx) ||
119 	       spi_context_rx_on(&data->ctx);
120 }
121 
spi_gd32_configure(const struct device * dev,const struct spi_config * config)122 static int spi_gd32_configure(const struct device *dev,
123 			      const struct spi_config *config)
124 {
125 	struct spi_gd32_data *data = dev->data;
126 	const struct spi_gd32_config *cfg = dev->config;
127 	uint32_t bus_freq;
128 
129 	if (spi_context_configured(&data->ctx, config)) {
130 		return 0;
131 	}
132 
133 	if (SPI_OP_MODE_GET(config->operation) == SPI_OP_MODE_SLAVE) {
134 		LOG_ERR("Slave mode not supported");
135 		return -ENOTSUP;
136 	}
137 
138 	SPI_CTL0(cfg->reg) &= ~SPI_CTL0_SPIEN;
139 
140 	SPI_CTL0(cfg->reg) |= SPI_MASTER;
141 	SPI_CTL0(cfg->reg) &= ~SPI_TRANSMODE_BDTRANSMIT;
142 
143 	if (SPI_WORD_SIZE_GET(config->operation) == 8) {
144 		SPI_CTL0(cfg->reg) |= SPI_FRAMESIZE_8BIT;
145 	} else {
146 		SPI_CTL0(cfg->reg) |= SPI_FRAMESIZE_16BIT;
147 	}
148 
149 	/* Reset to hardware NSS mode. */
150 	SPI_CTL0(cfg->reg) &= ~SPI_CTL0_SWNSSEN;
151 	if (spi_cs_is_gpio(config)) {
152 		SPI_CTL0(cfg->reg) |= SPI_CTL0_SWNSSEN;
153 	} else {
154 		/*
155 		 * For single master env,
156 		 * hardware NSS mode also need to set the NSSDRV bit.
157 		 */
158 		SPI_CTL1(cfg->reg) |= SPI_CTL1_NSSDRV;
159 	}
160 
161 	SPI_CTL0(cfg->reg) &= ~SPI_CTL0_LF;
162 	if (config->operation & SPI_TRANSFER_LSB) {
163 		SPI_CTL0(cfg->reg) |= SPI_CTL0_LF;
164 	}
165 
166 	SPI_CTL0(cfg->reg) &= ~SPI_CTL0_CKPL;
167 	if (config->operation & SPI_MODE_CPOL) {
168 		SPI_CTL0(cfg->reg) |= SPI_CTL0_CKPL;
169 	}
170 
171 	SPI_CTL0(cfg->reg) &= ~SPI_CTL0_CKPH;
172 	if (config->operation & SPI_MODE_CPHA) {
173 		SPI_CTL0(cfg->reg) |= SPI_CTL0_CKPH;
174 	}
175 
176 	(void)clock_control_get_rate(GD32_CLOCK_CONTROLLER,
177 				     (clock_control_subsys_t)&cfg->clkid,
178 				     &bus_freq);
179 
180 	for (uint8_t i = 0U; i <= GD32_SPI_PSC_MAX; i++) {
181 		bus_freq = bus_freq >> 1U;
182 		if (bus_freq <= config->frequency) {
183 			SPI_CTL0(cfg->reg) &= ~SPI_CTL0_PSC;
184 			SPI_CTL0(cfg->reg) |= CTL0_PSC(i);
185 			break;
186 		}
187 	}
188 
189 	data->ctx.config = config;
190 
191 	return 0;
192 }
193 
spi_gd32_frame_exchange(const struct device * dev)194 static int spi_gd32_frame_exchange(const struct device *dev)
195 {
196 	struct spi_gd32_data *data = dev->data;
197 	const struct spi_gd32_config *cfg = dev->config;
198 	struct spi_context *ctx = &data->ctx;
199 	uint16_t tx_frame = 0U, rx_frame = 0U;
200 
201 	while ((SPI_STAT(cfg->reg) & SPI_STAT_TBE) == 0) {
202 		/* NOP */
203 	}
204 
205 	if (SPI_WORD_SIZE_GET(ctx->config->operation) == 8) {
206 		if (spi_context_tx_buf_on(ctx)) {
207 			tx_frame = UNALIGNED_GET((uint8_t *)(data->ctx.tx_buf));
208 		}
209 		/* For 8 bits mode, spi will forced SPI_DATA[15:8] to 0. */
210 		SPI_DATA(cfg->reg) = tx_frame;
211 
212 		spi_context_update_tx(ctx, 1, 1);
213 	} else {
214 		if (spi_context_tx_buf_on(ctx)) {
215 			tx_frame = UNALIGNED_GET((uint8_t *)(data->ctx.tx_buf));
216 		}
217 		SPI_DATA(cfg->reg) = tx_frame;
218 
219 		spi_context_update_tx(ctx, 2, 1);
220 	}
221 
222 	while ((SPI_STAT(cfg->reg) & SPI_STAT_RBNE) == 0) {
223 		/* NOP */
224 	}
225 
226 	if (SPI_WORD_SIZE_GET(data->ctx.config->operation) == 8) {
227 		/* For 8 bits mode, spi will forced SPI_DATA[15:8] to 0. */
228 		rx_frame = SPI_DATA(cfg->reg);
229 		if (spi_context_rx_buf_on(ctx)) {
230 			UNALIGNED_PUT(rx_frame, (uint8_t *)data->ctx.rx_buf);
231 		}
232 
233 		spi_context_update_rx(ctx, 1, 1);
234 	} else {
235 		rx_frame = SPI_DATA(cfg->reg);
236 		if (spi_context_rx_buf_on(ctx)) {
237 			UNALIGNED_PUT(rx_frame, (uint16_t *)data->ctx.rx_buf);
238 		}
239 
240 		spi_context_update_rx(ctx, 2, 1);
241 	}
242 
243 	return spi_gd32_get_err(cfg);
244 }
245 
246 #ifdef CONFIG_SPI_GD32_DMA
247 static void spi_gd32_dma_callback(const struct device *dma_dev, void *arg,
248 				  uint32_t channel, int status);
249 
spi_gd32_dma_setup(const struct device * dev,const uint32_t dir)250 static uint32_t spi_gd32_dma_setup(const struct device *dev, const uint32_t dir)
251 {
252 	const struct spi_gd32_config *cfg = dev->config;
253 	struct spi_gd32_data *data = dev->data;
254 	struct dma_config *dma_cfg = &data->dma[dir].config;
255 	struct dma_block_config *block_cfg = &data->dma[dir].block;
256 	const struct spi_gd32_dma_config *dma = &cfg->dma[dir];
257 	int ret;
258 
259 	memset(dma_cfg, 0, sizeof(struct dma_config));
260 	memset(block_cfg, 0, sizeof(struct dma_block_config));
261 
262 	dma_cfg->source_burst_length = 1;
263 	dma_cfg->dest_burst_length = 1;
264 	dma_cfg->user_data = (void *)dev;
265 	dma_cfg->dma_callback = spi_gd32_dma_callback;
266 	dma_cfg->block_count = 1U;
267 	dma_cfg->head_block = block_cfg;
268 	dma_cfg->dma_slot = cfg->dma[dir].slot;
269 	dma_cfg->channel_priority =
270 		GD32_DMA_CONFIG_PRIORITY(cfg->dma[dir].config);
271 	dma_cfg->channel_direction =
272 		dir == TX ? MEMORY_TO_PERIPHERAL : PERIPHERAL_TO_MEMORY;
273 
274 	if (SPI_WORD_SIZE_GET(data->ctx.config->operation) == 8) {
275 		dma_cfg->source_data_size = 1;
276 		dma_cfg->dest_data_size = 1;
277 	} else {
278 		dma_cfg->source_data_size = 2;
279 		dma_cfg->dest_data_size = 2;
280 	}
281 
282 	block_cfg->block_size = spi_context_max_continuous_chunk(&data->ctx);
283 
284 	if (dir == TX) {
285 		block_cfg->dest_address = (uint32_t)&SPI_DATA(cfg->reg);
286 		block_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
287 		if (spi_context_tx_buf_on(&data->ctx)) {
288 			block_cfg->source_address = (uint32_t)data->ctx.tx_buf;
289 			block_cfg->source_addr_adj = DMA_ADDR_ADJ_INCREMENT;
290 		} else {
291 			block_cfg->source_address = (uint32_t)&dummy_tx;
292 			block_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
293 		}
294 	}
295 
296 	if (dir == RX) {
297 		block_cfg->source_address = (uint32_t)&SPI_DATA(cfg->reg);
298 		block_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
299 
300 		if (spi_context_rx_buf_on(&data->ctx)) {
301 			block_cfg->dest_address = (uint32_t)data->ctx.rx_buf;
302 			block_cfg->dest_addr_adj = DMA_ADDR_ADJ_INCREMENT;
303 		} else {
304 			block_cfg->dest_address = (uint32_t)&dummy_rx;
305 			block_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
306 		}
307 	}
308 
309 	ret = dma_config(dma->dev, dma->channel, dma_cfg);
310 	if (ret < 0) {
311 		LOG_ERR("dma_config %p failed %d\n", dma->dev, ret);
312 		return ret;
313 	}
314 
315 	ret = dma_start(dma->dev, dma->channel);
316 	if (ret < 0) {
317 		LOG_ERR("dma_start %p failed %d\n", dma->dev, ret);
318 		return ret;
319 	}
320 
321 	return 0;
322 }
323 
spi_gd32_start_dma_transceive(const struct device * dev)324 static int spi_gd32_start_dma_transceive(const struct device *dev)
325 {
326 	const struct spi_gd32_config *cfg = dev->config;
327 	struct spi_gd32_data *data = dev->data;
328 	const size_t chunk_len = spi_context_max_continuous_chunk(&data->ctx);
329 	struct dma_status stat;
330 	int ret = 0;
331 
332 	for (size_t i = 0; i < spi_gd32_dma_enabled_num(dev); i++) {
333 		dma_get_status(cfg->dma[i].dev, cfg->dma[i].channel, &stat);
334 		if ((chunk_len != data->dma[i].count) && !stat.busy) {
335 			ret = spi_gd32_dma_setup(dev, i);
336 			if (ret < 0) {
337 				goto on_error;
338 			}
339 		}
340 	}
341 
342 	SPI_CTL1(cfg->reg) |= (SPI_CTL1_DMATEN | SPI_CTL1_DMAREN);
343 
344 on_error:
345 	if (ret < 0) {
346 		for (size_t i = 0; i < spi_gd32_dma_enabled_num(dev); i++) {
347 			dma_stop(cfg->dma[i].dev, cfg->dma[i].channel);
348 		}
349 	}
350 	return ret;
351 }
352 #endif
353 
spi_gd32_transceive_impl(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)354 static int spi_gd32_transceive_impl(const struct device *dev,
355 				    const struct spi_config *config,
356 				    const struct spi_buf_set *tx_bufs,
357 				    const struct spi_buf_set *rx_bufs,
358 				    spi_callback_t cb,
359 				    void *userdata)
360 {
361 	struct spi_gd32_data *data = dev->data;
362 	const struct spi_gd32_config *cfg = dev->config;
363 	int ret;
364 
365 	spi_context_lock(&data->ctx, (cb != NULL), cb, userdata, config);
366 
367 	ret = spi_gd32_configure(dev, config);
368 	if (ret < 0) {
369 		goto error;
370 	}
371 
372 	SPI_CTL0(cfg->reg) |= SPI_CTL0_SPIEN;
373 
374 	spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
375 
376 	spi_context_cs_control(&data->ctx, true);
377 
378 #ifdef CONFIG_SPI_GD32_INTERRUPT
379 #ifdef CONFIG_SPI_GD32_DMA
380 	if (spi_gd32_dma_enabled(dev)) {
381 		for (size_t i = 0; i < ARRAY_SIZE(data->dma); i++) {
382 			data->dma[i].count = 0;
383 		}
384 
385 		ret = spi_gd32_start_dma_transceive(dev);
386 		if (ret < 0) {
387 			goto dma_error;
388 		}
389 	} else
390 #endif
391 	{
392 		SPI_STAT(cfg->reg) &=
393 			~(SPI_STAT_RBNE | SPI_STAT_TBE | SPI_GD32_ERR_MASK);
394 		SPI_CTL1(cfg->reg) |=
395 			(SPI_CTL1_RBNEIE | SPI_CTL1_TBEIE | SPI_CTL1_ERRIE);
396 	}
397 	ret = spi_context_wait_for_completion(&data->ctx);
398 #else
399 	do {
400 		ret = spi_gd32_frame_exchange(dev);
401 		if (ret < 0) {
402 			break;
403 		}
404 	} while (spi_gd32_transfer_ongoing(data));
405 
406 #ifdef CONFIG_SPI_ASYNC
407 	spi_context_complete(&data->ctx, dev, ret);
408 #endif
409 #endif
410 
411 	while (!(SPI_STAT(cfg->reg) & SPI_STAT_TBE) ||
412 		(SPI_STAT(cfg->reg) & SPI_STAT_TRANS)) {
413 		/* Wait until last frame transfer complete. */
414 	}
415 
416 #ifdef CONFIG_SPI_GD32_DMA
417 dma_error:
418 	SPI_CTL1(cfg->reg) &=
419 		~(SPI_CTL1_DMATEN | SPI_CTL1_DMAREN);
420 #endif
421 	spi_context_cs_control(&data->ctx, false);
422 
423 	SPI_CTL0(cfg->reg) &=
424 		~(SPI_CTL0_SPIEN);
425 
426 error:
427 	spi_context_release(&data->ctx, ret);
428 
429 	return ret;
430 }
431 
spi_gd32_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)432 static int spi_gd32_transceive(const struct device *dev,
433 			       const struct spi_config *config,
434 			       const struct spi_buf_set *tx_bufs,
435 			       const struct spi_buf_set *rx_bufs)
436 {
437 	return spi_gd32_transceive_impl(dev, config, tx_bufs, rx_bufs, NULL, NULL);
438 }
439 
440 #ifdef CONFIG_SPI_ASYNC
spi_gd32_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)441 static int spi_gd32_transceive_async(const struct device *dev,
442 				     const struct spi_config *config,
443 				     const struct spi_buf_set *tx_bufs,
444 				     const struct spi_buf_set *rx_bufs,
445 				     spi_callback_t cb,
446 				     void *userdata)
447 {
448 	return spi_gd32_transceive_impl(dev, config, tx_bufs, rx_bufs, cb, userdata);
449 }
450 #endif
451 
452 #ifdef CONFIG_SPI_GD32_INTERRUPT
453 
spi_gd32_complete(const struct device * dev,int status)454 static void spi_gd32_complete(const struct device *dev, int status)
455 {
456 	struct spi_gd32_data *data = dev->data;
457 	const struct spi_gd32_config *cfg = dev->config;
458 
459 	SPI_CTL1(cfg->reg) &=
460 		~(SPI_CTL1_RBNEIE | SPI_CTL1_TBEIE | SPI_CTL1_ERRIE);
461 
462 #ifdef CONFIG_SPI_GD32_DMA
463 	for (size_t i = 0; i < spi_gd32_dma_enabled_num(dev); i++) {
464 		dma_stop(cfg->dma[i].dev, cfg->dma[i].channel);
465 	}
466 #endif
467 
468 	spi_context_complete(&data->ctx, dev, status);
469 }
470 
spi_gd32_isr(struct device * dev)471 static void spi_gd32_isr(struct device *dev)
472 {
473 	const struct spi_gd32_config *cfg = dev->config;
474 	struct spi_gd32_data *data = dev->data;
475 	int err = 0;
476 
477 	err = spi_gd32_get_err(cfg);
478 	if (err) {
479 		spi_gd32_complete(dev, err);
480 		return;
481 	}
482 
483 	if (spi_gd32_transfer_ongoing(data)) {
484 		err = spi_gd32_frame_exchange(dev);
485 	}
486 
487 	if (err || !spi_gd32_transfer_ongoing(data)) {
488 		spi_gd32_complete(dev, err);
489 	}
490 }
491 
492 #endif /* SPI_GD32_INTERRUPT */
493 
494 #ifdef CONFIG_SPI_GD32_DMA
495 
spi_gd32_chunk_transfer_finished(const struct device * dev)496 static bool spi_gd32_chunk_transfer_finished(const struct device *dev)
497 {
498 	struct spi_gd32_data *data = dev->data;
499 	struct spi_gd32_dma_data *dma = data->dma;
500 	const size_t chunk_len = spi_context_max_continuous_chunk(&data->ctx);
501 
502 	return (MIN(dma[TX].count, dma[RX].count) >= chunk_len);
503 }
504 
spi_gd32_dma_callback(const struct device * dma_dev,void * arg,uint32_t channel,int status)505 static void spi_gd32_dma_callback(const struct device *dma_dev, void *arg,
506 				  uint32_t channel, int status)
507 {
508 	const struct device *dev = (const struct device *)arg;
509 	const struct spi_gd32_config *cfg = dev->config;
510 	struct spi_gd32_data *data = dev->data;
511 	const size_t chunk_len = spi_context_max_continuous_chunk(&data->ctx);
512 	int err = 0;
513 
514 	if (status < 0) {
515 		LOG_ERR("dma:%p ch:%d callback gets error: %d", dma_dev, channel,
516 			status);
517 		spi_gd32_complete(dev, status);
518 		return;
519 	}
520 
521 	for (size_t i = 0; i < ARRAY_SIZE(cfg->dma); i++) {
522 		if (dma_dev == cfg->dma[i].dev &&
523 		    channel == cfg->dma[i].channel) {
524 			data->dma[i].count += chunk_len;
525 		}
526 	}
527 
528 	/* Check transfer finished.
529 	 * The transmission of this chunk is complete if both the dma[TX].count
530 	 * and the dma[RX].count reach greater than or equal to the chunk_len.
531 	 * chunk_len is zero here means the transfer is already complete.
532 	 */
533 	if (spi_gd32_chunk_transfer_finished(dev)) {
534 		if (SPI_WORD_SIZE_GET(data->ctx.config->operation) == 8) {
535 			spi_context_update_tx(&data->ctx, 1, chunk_len);
536 			spi_context_update_rx(&data->ctx, 1, chunk_len);
537 		} else {
538 			spi_context_update_tx(&data->ctx, 2, chunk_len);
539 			spi_context_update_rx(&data->ctx, 2, chunk_len);
540 		}
541 
542 		if (spi_gd32_transfer_ongoing(data)) {
543 			/* Next chunk is available, reset the count and
544 			 * continue processing
545 			 */
546 			data->dma[TX].count = 0;
547 			data->dma[RX].count = 0;
548 		} else {
549 			/* All data is processed, complete the process */
550 			spi_context_complete(&data->ctx, dev, 0);
551 			return;
552 		}
553 	}
554 
555 	err = spi_gd32_start_dma_transceive(dev);
556 	if (err) {
557 		spi_gd32_complete(dev, err);
558 	}
559 }
560 
561 #endif /* DMA */
562 
spi_gd32_release(const struct device * dev,const struct spi_config * config)563 static int spi_gd32_release(const struct device *dev,
564 			    const struct spi_config *config)
565 {
566 	struct spi_gd32_data *data = dev->data;
567 
568 	spi_context_unlock_unconditionally(&data->ctx);
569 
570 	return 0;
571 }
572 
573 static DEVICE_API(spi, spi_gd32_driver_api) = {
574 	.transceive = spi_gd32_transceive,
575 #ifdef CONFIG_SPI_ASYNC
576 	.transceive_async = spi_gd32_transceive_async,
577 #endif
578 #ifdef CONFIG_SPI_RTIO
579 	.iodev_submit = spi_rtio_iodev_default_submit,
580 #endif
581 	.release = spi_gd32_release
582 };
583 
spi_gd32_init(const struct device * dev)584 int spi_gd32_init(const struct device *dev)
585 {
586 	struct spi_gd32_data *data = dev->data;
587 	const struct spi_gd32_config *cfg = dev->config;
588 	int ret;
589 #ifdef CONFIG_SPI_GD32_DMA
590 	uint32_t ch_filter;
591 #endif
592 
593 	(void)clock_control_on(GD32_CLOCK_CONTROLLER,
594 			       (clock_control_subsys_t)&cfg->clkid);
595 
596 	(void)reset_line_toggle_dt(&cfg->reset);
597 
598 	ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
599 	if (ret) {
600 		LOG_ERR("Failed to apply pinctrl state");
601 		return ret;
602 	}
603 
604 #ifdef CONFIG_SPI_GD32_DMA
605 	if ((cfg->dma[RX].dev && !cfg->dma[TX].dev) ||
606 	    (cfg->dma[TX].dev && !cfg->dma[RX].dev)) {
607 		LOG_ERR("DMA must be enabled for both TX and RX channels");
608 		return -ENODEV;
609 	}
610 
611 	for (size_t i = 0; i < spi_gd32_dma_enabled_num(dev); i++) {
612 		if (!device_is_ready(cfg->dma[i].dev)) {
613 			LOG_ERR("DMA %s not ready", cfg->dma[i].dev->name);
614 			return -ENODEV;
615 		}
616 
617 		ch_filter = BIT(cfg->dma[i].channel);
618 		ret = dma_request_channel(cfg->dma[i].dev, &ch_filter);
619 		if (ret < 0) {
620 			LOG_ERR("dma_request_channel failed %d", ret);
621 			return ret;
622 		}
623 	}
624 #endif
625 
626 	ret = spi_context_cs_configure_all(&data->ctx);
627 	if (ret < 0) {
628 		return ret;
629 	}
630 
631 #ifdef CONFIG_SPI_GD32_INTERRUPT
632 	cfg->irq_configure(dev);
633 #endif
634 
635 	spi_context_unlock_unconditionally(&data->ctx);
636 
637 	return 0;
638 }
639 
640 #define DMA_INITIALIZER(idx, dir)                                              \
641 	{                                                                      \
642 		.dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(idx, dir)),     \
643 		.channel = DT_INST_DMAS_CELL_BY_NAME(idx, dir, channel),       \
644 		.slot = COND_CODE_1(                                           \
645 			DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_dma_v1),             \
646 			(DT_INST_DMAS_CELL_BY_NAME(idx, dir, slot)), (0)),     \
647 		.config = DT_INST_DMAS_CELL_BY_NAME(idx, dir, config),         \
648 		.fifo_threshold = COND_CODE_1(                                 \
649 			DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_dma_v1),             \
650 			(DT_INST_DMAS_CELL_BY_NAME(idx, dir, fifo_threshold)), \
651 			(0)),						  \
652 	}
653 
654 #define DMAS_DECL(idx)                                                         \
655 	{                                                                      \
656 		COND_CODE_1(DT_INST_DMAS_HAS_NAME(idx, rx),                    \
657 			    (DMA_INITIALIZER(idx, rx)), ({0})),                \
658 		COND_CODE_1(DT_INST_DMAS_HAS_NAME(idx, tx),                    \
659 			    (DMA_INITIALIZER(idx, tx)), ({0})),                \
660 	}
661 
662 #define GD32_IRQ_CONFIGURE(idx)						   \
663 	static void spi_gd32_irq_configure_##idx(void)			   \
664 	{								   \
665 		IRQ_CONNECT(DT_INST_IRQN(idx), DT_INST_IRQ(idx, priority), \
666 			    spi_gd32_isr,				   \
667 			    DEVICE_DT_INST_GET(idx), 0);		   \
668 		irq_enable(DT_INST_IRQN(idx));				   \
669 	}
670 
671 #define GD32_SPI_INIT(idx)						       \
672 	PINCTRL_DT_INST_DEFINE(idx);					       \
673 	IF_ENABLED(CONFIG_SPI_GD32_INTERRUPT, (GD32_IRQ_CONFIGURE(idx)));      \
674 	static struct spi_gd32_data spi_gd32_data_##idx = {		       \
675 		SPI_CONTEXT_INIT_LOCK(spi_gd32_data_##idx, ctx),	       \
676 		SPI_CONTEXT_INIT_SYNC(spi_gd32_data_##idx, ctx),	       \
677 		SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(idx), ctx) };      \
678 	static struct spi_gd32_config spi_gd32_config_##idx = {		       \
679 		.reg = DT_INST_REG_ADDR(idx),				       \
680 		.clkid = DT_INST_CLOCKS_CELL(idx, id),			       \
681 		.reset = RESET_DT_SPEC_INST_GET(idx),			       \
682 		.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(idx),		       \
683 		IF_ENABLED(CONFIG_SPI_GD32_DMA, (.dma = DMAS_DECL(idx),))      \
684 		IF_ENABLED(CONFIG_SPI_GD32_INTERRUPT,			       \
685 			   (.irq_configure = spi_gd32_irq_configure_##idx)) }; \
686 	SPI_DEVICE_DT_INST_DEFINE(idx, spi_gd32_init, NULL,			       \
687 			      &spi_gd32_data_##idx, &spi_gd32_config_##idx,    \
688 			      POST_KERNEL, CONFIG_SPI_INIT_PRIORITY,	       \
689 			      &spi_gd32_driver_api);
690 
691 DT_INST_FOREACH_STATUS_OKAY(GD32_SPI_INIT)
692