1 /*
2  * Copyright (c) 2021 BrainCo Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT gd_gd32_spi
8 
9 #include <errno.h>
10 #include <zephyr/kernel.h>
11 #include <zephyr/drivers/clock_control.h>
12 #include <zephyr/drivers/clock_control/gd32.h>
13 #include <zephyr/drivers/pinctrl.h>
14 #include <zephyr/drivers/reset.h>
15 #include <zephyr/drivers/spi.h>
16 #ifdef CONFIG_SPI_GD32_DMA
17 #include <zephyr/drivers/dma.h>
18 #include <zephyr/drivers/dma/dma_gd32.h>
19 #endif
20 
21 #include <gd32_spi.h>
22 
23 #include <zephyr/logging/log.h>
24 #include <zephyr/irq.h>
25 LOG_MODULE_REGISTER(spi_gd32);
26 
27 #include "spi_context.h"
28 
29 /* SPI error status mask. */
30 #define SPI_GD32_ERR_MASK	(SPI_STAT_RXORERR | SPI_STAT_CONFERR | SPI_STAT_CRCERR)
31 
32 #define GD32_SPI_PSC_MAX	0x7U
33 
34 #ifdef CONFIG_SPI_GD32_DMA
35 
36 enum spi_gd32_dma_direction {
37 	RX = 0,
38 	TX,
39 	NUM_OF_DIRECTION
40 };
41 
42 struct spi_gd32_dma_config {
43 	const struct device *dev;
44 	uint32_t channel;
45 	uint32_t config;
46 	uint32_t slot;
47 	uint32_t fifo_threshold;
48 };
49 
50 struct spi_gd32_dma_data {
51 	struct dma_config config;
52 	struct dma_block_config block;
53 	uint32_t count;
54 };
55 
56 #endif
57 
58 struct spi_gd32_config {
59 	uint32_t reg;
60 	uint16_t clkid;
61 	struct reset_dt_spec reset;
62 	const struct pinctrl_dev_config *pcfg;
63 #ifdef CONFIG_SPI_GD32_DMA
64 	const struct spi_gd32_dma_config dma[NUM_OF_DIRECTION];
65 #endif
66 #ifdef CONFIG_SPI_GD32_INTERRUPT
67 	void (*irq_configure)();
68 #endif
69 };
70 
71 struct spi_gd32_data {
72 	struct spi_context ctx;
73 #ifdef CONFIG_SPI_GD32_DMA
74 	struct spi_gd32_dma_data dma[NUM_OF_DIRECTION];
75 #endif
76 };
77 
78 #ifdef CONFIG_SPI_GD32_DMA
79 
80 static uint32_t dummy_tx;
81 static uint32_t dummy_rx;
82 
spi_gd32_dma_enabled(const struct device * dev)83 static bool spi_gd32_dma_enabled(const struct device *dev)
84 {
85 	const struct spi_gd32_config *cfg = dev->config;
86 
87 	if (cfg->dma[TX].dev && cfg->dma[RX].dev) {
88 		return true;
89 	}
90 
91 	return false;
92 }
93 
spi_gd32_dma_enabled_num(const struct device * dev)94 static size_t spi_gd32_dma_enabled_num(const struct device *dev)
95 {
96 	return spi_gd32_dma_enabled(dev) ? 2 : 0;
97 }
98 
99 #endif
100 
spi_gd32_get_err(const struct spi_gd32_config * cfg)101 static int spi_gd32_get_err(const struct spi_gd32_config *cfg)
102 {
103 	uint32_t stat = SPI_STAT(cfg->reg);
104 
105 	if (stat & SPI_GD32_ERR_MASK) {
106 		LOG_ERR("spi%u error status detected, err = %u",
107 			cfg->reg, stat & (uint32_t)SPI_GD32_ERR_MASK);
108 
109 		return -EIO;
110 	}
111 
112 	return 0;
113 }
114 
spi_gd32_transfer_ongoing(struct spi_gd32_data * data)115 static bool spi_gd32_transfer_ongoing(struct spi_gd32_data *data)
116 {
117 	return spi_context_tx_on(&data->ctx) ||
118 	       spi_context_rx_on(&data->ctx);
119 }
120 
spi_gd32_configure(const struct device * dev,const struct spi_config * config)121 static int spi_gd32_configure(const struct device *dev,
122 			      const struct spi_config *config)
123 {
124 	struct spi_gd32_data *data = dev->data;
125 	const struct spi_gd32_config *cfg = dev->config;
126 	uint32_t bus_freq;
127 
128 	if (spi_context_configured(&data->ctx, config)) {
129 		return 0;
130 	}
131 
132 	if (SPI_OP_MODE_GET(config->operation) == SPI_OP_MODE_SLAVE) {
133 		LOG_ERR("Slave mode not supported");
134 		return -ENOTSUP;
135 	}
136 
137 	SPI_CTL0(cfg->reg) &= ~SPI_CTL0_SPIEN;
138 
139 	SPI_CTL0(cfg->reg) |= SPI_MASTER;
140 	SPI_CTL0(cfg->reg) &= ~SPI_TRANSMODE_BDTRANSMIT;
141 
142 	if (SPI_WORD_SIZE_GET(config->operation) == 8) {
143 		SPI_CTL0(cfg->reg) |= SPI_FRAMESIZE_8BIT;
144 	} else {
145 		SPI_CTL0(cfg->reg) |= SPI_FRAMESIZE_16BIT;
146 	}
147 
148 	/* Reset to hardware NSS mode. */
149 	SPI_CTL0(cfg->reg) &= ~SPI_CTL0_SWNSSEN;
150 	if (spi_cs_is_gpio(config)) {
151 		SPI_CTL0(cfg->reg) |= SPI_CTL0_SWNSSEN;
152 	} else {
153 		/*
154 		 * For single master env,
155 		 * hardware NSS mode also need to set the NSSDRV bit.
156 		 */
157 		SPI_CTL1(cfg->reg) |= SPI_CTL1_NSSDRV;
158 	}
159 
160 	SPI_CTL0(cfg->reg) &= ~SPI_CTL0_LF;
161 	if (config->operation & SPI_TRANSFER_LSB) {
162 		SPI_CTL0(cfg->reg) |= SPI_CTL0_LF;
163 	}
164 
165 	SPI_CTL0(cfg->reg) &= ~SPI_CTL0_CKPL;
166 	if (config->operation & SPI_MODE_CPOL) {
167 		SPI_CTL0(cfg->reg) |= SPI_CTL0_CKPL;
168 	}
169 
170 	SPI_CTL0(cfg->reg) &= ~SPI_CTL0_CKPH;
171 	if (config->operation & SPI_MODE_CPHA) {
172 		SPI_CTL0(cfg->reg) |= SPI_CTL0_CKPH;
173 	}
174 
175 	(void)clock_control_get_rate(GD32_CLOCK_CONTROLLER,
176 				     (clock_control_subsys_t)&cfg->clkid,
177 				     &bus_freq);
178 
179 	for (uint8_t i = 0U; i <= GD32_SPI_PSC_MAX; i++) {
180 		bus_freq = bus_freq >> 1U;
181 		if (bus_freq <= config->frequency) {
182 			SPI_CTL0(cfg->reg) &= ~SPI_CTL0_PSC;
183 			SPI_CTL0(cfg->reg) |= CTL0_PSC(i);
184 			break;
185 		}
186 	}
187 
188 	data->ctx.config = config;
189 
190 	return 0;
191 }
192 
spi_gd32_frame_exchange(const struct device * dev)193 static int spi_gd32_frame_exchange(const struct device *dev)
194 {
195 	struct spi_gd32_data *data = dev->data;
196 	const struct spi_gd32_config *cfg = dev->config;
197 	struct spi_context *ctx = &data->ctx;
198 	uint16_t tx_frame = 0U, rx_frame = 0U;
199 
200 	while ((SPI_STAT(cfg->reg) & SPI_STAT_TBE) == 0) {
201 		/* NOP */
202 	}
203 
204 	if (SPI_WORD_SIZE_GET(ctx->config->operation) == 8) {
205 		if (spi_context_tx_buf_on(ctx)) {
206 			tx_frame = UNALIGNED_GET((uint8_t *)(data->ctx.tx_buf));
207 		}
208 		/* For 8 bits mode, spi will forced SPI_DATA[15:8] to 0. */
209 		SPI_DATA(cfg->reg) = tx_frame;
210 
211 		spi_context_update_tx(ctx, 1, 1);
212 	} else {
213 		if (spi_context_tx_buf_on(ctx)) {
214 			tx_frame = UNALIGNED_GET((uint8_t *)(data->ctx.tx_buf));
215 		}
216 		SPI_DATA(cfg->reg) = tx_frame;
217 
218 		spi_context_update_tx(ctx, 2, 1);
219 	}
220 
221 	while ((SPI_STAT(cfg->reg) & SPI_STAT_RBNE) == 0) {
222 		/* NOP */
223 	}
224 
225 	if (SPI_WORD_SIZE_GET(data->ctx.config->operation) == 8) {
226 		/* For 8 bits mode, spi will forced SPI_DATA[15:8] to 0. */
227 		rx_frame = SPI_DATA(cfg->reg);
228 		if (spi_context_rx_buf_on(ctx)) {
229 			UNALIGNED_PUT(rx_frame, (uint8_t *)data->ctx.rx_buf);
230 		}
231 
232 		spi_context_update_rx(ctx, 1, 1);
233 	} else {
234 		rx_frame = SPI_DATA(cfg->reg);
235 		if (spi_context_rx_buf_on(ctx)) {
236 			UNALIGNED_PUT(rx_frame, (uint16_t *)data->ctx.rx_buf);
237 		}
238 
239 		spi_context_update_rx(ctx, 2, 1);
240 	}
241 
242 	return spi_gd32_get_err(cfg);
243 }
244 
245 #ifdef CONFIG_SPI_GD32_DMA
246 static void spi_gd32_dma_callback(const struct device *dma_dev, void *arg,
247 				  uint32_t channel, int status);
248 
spi_gd32_dma_setup(const struct device * dev,const uint32_t dir)249 static uint32_t spi_gd32_dma_setup(const struct device *dev, const uint32_t dir)
250 {
251 	const struct spi_gd32_config *cfg = dev->config;
252 	struct spi_gd32_data *data = dev->data;
253 	struct dma_config *dma_cfg = &data->dma[dir].config;
254 	struct dma_block_config *block_cfg = &data->dma[dir].block;
255 	const struct spi_gd32_dma_config *dma = &cfg->dma[dir];
256 	int ret;
257 
258 	memset(dma_cfg, 0, sizeof(struct dma_config));
259 	memset(block_cfg, 0, sizeof(struct dma_block_config));
260 
261 	dma_cfg->source_burst_length = 1;
262 	dma_cfg->dest_burst_length = 1;
263 	dma_cfg->user_data = (void *)dev;
264 	dma_cfg->dma_callback = spi_gd32_dma_callback;
265 	dma_cfg->block_count = 1U;
266 	dma_cfg->head_block = block_cfg;
267 	dma_cfg->dma_slot = cfg->dma[dir].slot;
268 	dma_cfg->channel_priority =
269 		GD32_DMA_CONFIG_PRIORITY(cfg->dma[dir].config);
270 	dma_cfg->channel_direction =
271 		dir == TX ? MEMORY_TO_PERIPHERAL : PERIPHERAL_TO_MEMORY;
272 
273 	if (SPI_WORD_SIZE_GET(data->ctx.config->operation) == 8) {
274 		dma_cfg->source_data_size = 1;
275 		dma_cfg->dest_data_size = 1;
276 	} else {
277 		dma_cfg->source_data_size = 2;
278 		dma_cfg->dest_data_size = 2;
279 	}
280 
281 	block_cfg->block_size = spi_context_max_continuous_chunk(&data->ctx);
282 
283 	if (dir == TX) {
284 		block_cfg->dest_address = (uint32_t)&SPI_DATA(cfg->reg);
285 		block_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
286 		if (spi_context_tx_buf_on(&data->ctx)) {
287 			block_cfg->source_address = (uint32_t)data->ctx.tx_buf;
288 			block_cfg->source_addr_adj = DMA_ADDR_ADJ_INCREMENT;
289 		} else {
290 			block_cfg->source_address = (uint32_t)&dummy_tx;
291 			block_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
292 		}
293 	}
294 
295 	if (dir == RX) {
296 		block_cfg->source_address = (uint32_t)&SPI_DATA(cfg->reg);
297 		block_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
298 
299 		if (spi_context_rx_buf_on(&data->ctx)) {
300 			block_cfg->dest_address = (uint32_t)data->ctx.rx_buf;
301 			block_cfg->dest_addr_adj = DMA_ADDR_ADJ_INCREMENT;
302 		} else {
303 			block_cfg->dest_address = (uint32_t)&dummy_rx;
304 			block_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
305 		}
306 	}
307 
308 	ret = dma_config(dma->dev, dma->channel, dma_cfg);
309 	if (ret < 0) {
310 		LOG_ERR("dma_config %p failed %d\n", dma->dev, ret);
311 		return ret;
312 	}
313 
314 	ret = dma_start(dma->dev, dma->channel);
315 	if (ret < 0) {
316 		LOG_ERR("dma_start %p failed %d\n", dma->dev, ret);
317 		return ret;
318 	}
319 
320 	return 0;
321 }
322 
spi_gd32_start_dma_transceive(const struct device * dev)323 static int spi_gd32_start_dma_transceive(const struct device *dev)
324 {
325 	const struct spi_gd32_config *cfg = dev->config;
326 	struct spi_gd32_data *data = dev->data;
327 	const size_t chunk_len = spi_context_max_continuous_chunk(&data->ctx);
328 	struct dma_status stat;
329 	int ret = 0;
330 
331 	for (size_t i = 0; i < spi_gd32_dma_enabled_num(dev); i++) {
332 		dma_get_status(cfg->dma[i].dev, cfg->dma[i].channel, &stat);
333 		if ((chunk_len != data->dma[i].count) && !stat.busy) {
334 			ret = spi_gd32_dma_setup(dev, i);
335 			if (ret < 0) {
336 				goto on_error;
337 			}
338 		}
339 	}
340 
341 	SPI_CTL1(cfg->reg) |= (SPI_CTL1_DMATEN | SPI_CTL1_DMAREN);
342 
343 on_error:
344 	if (ret < 0) {
345 		for (size_t i = 0; i < spi_gd32_dma_enabled_num(dev); i++) {
346 			dma_stop(cfg->dma[i].dev, cfg->dma[i].channel);
347 		}
348 	}
349 	return ret;
350 }
351 #endif
352 
spi_gd32_transceive_impl(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)353 static int spi_gd32_transceive_impl(const struct device *dev,
354 				    const struct spi_config *config,
355 				    const struct spi_buf_set *tx_bufs,
356 				    const struct spi_buf_set *rx_bufs,
357 				    spi_callback_t cb,
358 				    void *userdata)
359 {
360 	struct spi_gd32_data *data = dev->data;
361 	const struct spi_gd32_config *cfg = dev->config;
362 	int ret;
363 
364 	spi_context_lock(&data->ctx, (cb != NULL), cb, userdata, config);
365 
366 	ret = spi_gd32_configure(dev, config);
367 	if (ret < 0) {
368 		goto error;
369 	}
370 
371 	SPI_CTL0(cfg->reg) |= SPI_CTL0_SPIEN;
372 
373 	spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
374 
375 	spi_context_cs_control(&data->ctx, true);
376 
377 #ifdef CONFIG_SPI_GD32_INTERRUPT
378 #ifdef CONFIG_SPI_GD32_DMA
379 	if (spi_gd32_dma_enabled(dev)) {
380 		for (size_t i = 0; i < ARRAY_SIZE(data->dma); i++) {
381 			data->dma[i].count = 0;
382 		}
383 
384 		ret = spi_gd32_start_dma_transceive(dev);
385 		if (ret < 0) {
386 			goto dma_error;
387 		}
388 	} else
389 #endif
390 	{
391 		SPI_STAT(cfg->reg) &=
392 			~(SPI_STAT_RBNE | SPI_STAT_TBE | SPI_GD32_ERR_MASK);
393 		SPI_CTL1(cfg->reg) |=
394 			(SPI_CTL1_RBNEIE | SPI_CTL1_TBEIE | SPI_CTL1_ERRIE);
395 	}
396 	ret = spi_context_wait_for_completion(&data->ctx);
397 #else
398 	do {
399 		ret = spi_gd32_frame_exchange(dev);
400 		if (ret < 0) {
401 			break;
402 		}
403 	} while (spi_gd32_transfer_ongoing(data));
404 
405 #ifdef CONFIG_SPI_ASYNC
406 	spi_context_complete(&data->ctx, dev, ret);
407 #endif
408 #endif
409 
410 	while (!(SPI_STAT(cfg->reg) & SPI_STAT_TBE) ||
411 		(SPI_STAT(cfg->reg) & SPI_STAT_TRANS)) {
412 		/* Wait until last frame transfer complete. */
413 	}
414 
415 #ifdef CONFIG_SPI_GD32_DMA
416 dma_error:
417 #endif
418 	spi_context_cs_control(&data->ctx, false);
419 
420 	SPI_CTL0(cfg->reg) &=
421 		~(SPI_CTL0_SPIEN | SPI_CTL1_DMATEN | SPI_CTL1_DMAREN);
422 
423 error:
424 	spi_context_release(&data->ctx, ret);
425 
426 	return ret;
427 }
428 
spi_gd32_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)429 static int spi_gd32_transceive(const struct device *dev,
430 			       const struct spi_config *config,
431 			       const struct spi_buf_set *tx_bufs,
432 			       const struct spi_buf_set *rx_bufs)
433 {
434 	return spi_gd32_transceive_impl(dev, config, tx_bufs, rx_bufs, NULL, NULL);
435 }
436 
437 #ifdef CONFIG_SPI_ASYNC
spi_gd32_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)438 static int spi_gd32_transceive_async(const struct device *dev,
439 				     const struct spi_config *config,
440 				     const struct spi_buf_set *tx_bufs,
441 				     const struct spi_buf_set *rx_bufs,
442 				     spi_callback_t cb,
443 				     void *userdata)
444 {
445 	return spi_gd32_transceive_impl(dev, config, tx_bufs, rx_bufs, cb, userdata);
446 }
447 #endif
448 
449 #ifdef CONFIG_SPI_GD32_INTERRUPT
450 
spi_gd32_complete(const struct device * dev,int status)451 static void spi_gd32_complete(const struct device *dev, int status)
452 {
453 	struct spi_gd32_data *data = dev->data;
454 	const struct spi_gd32_config *cfg = dev->config;
455 
456 	SPI_CTL1(cfg->reg) &=
457 		~(SPI_CTL1_RBNEIE | SPI_CTL1_TBEIE | SPI_CTL1_ERRIE);
458 
459 #ifdef CONFIG_SPI_GD32_DMA
460 	for (size_t i = 0; i < spi_gd32_dma_enabled_num(dev); i++) {
461 		dma_stop(cfg->dma[i].dev, cfg->dma[i].channel);
462 	}
463 #endif
464 
465 	spi_context_complete(&data->ctx, dev, status);
466 }
467 
spi_gd32_isr(struct device * dev)468 static void spi_gd32_isr(struct device *dev)
469 {
470 	const struct spi_gd32_config *cfg = dev->config;
471 	struct spi_gd32_data *data = dev->data;
472 	int err = 0;
473 
474 	err = spi_gd32_get_err(cfg);
475 	if (err) {
476 		spi_gd32_complete(dev, err);
477 		return;
478 	}
479 
480 	if (spi_gd32_transfer_ongoing(data)) {
481 		err = spi_gd32_frame_exchange(dev);
482 	}
483 
484 	if (err || !spi_gd32_transfer_ongoing(data)) {
485 		spi_gd32_complete(dev, err);
486 	}
487 }
488 
489 #endif /* SPI_GD32_INTERRUPT */
490 
491 #ifdef CONFIG_SPI_GD32_DMA
492 
spi_gd32_chunk_transfer_finished(const struct device * dev)493 static bool spi_gd32_chunk_transfer_finished(const struct device *dev)
494 {
495 	struct spi_gd32_data *data = dev->data;
496 	struct spi_gd32_dma_data *dma = data->dma;
497 	const size_t chunk_len = spi_context_max_continuous_chunk(&data->ctx);
498 
499 	return (MIN(dma[TX].count, dma[RX].count) >= chunk_len);
500 }
501 
spi_gd32_dma_callback(const struct device * dma_dev,void * arg,uint32_t channel,int status)502 static void spi_gd32_dma_callback(const struct device *dma_dev, void *arg,
503 				  uint32_t channel, int status)
504 {
505 	const struct device *dev = (const struct device *)arg;
506 	const struct spi_gd32_config *cfg = dev->config;
507 	struct spi_gd32_data *data = dev->data;
508 	const size_t chunk_len = spi_context_max_continuous_chunk(&data->ctx);
509 	int err = 0;
510 
511 	if (status < 0) {
512 		LOG_ERR("dma:%p ch:%d callback gets error: %d", dma_dev, channel,
513 			status);
514 		spi_gd32_complete(dev, status);
515 		return;
516 	}
517 
518 	for (size_t i = 0; i < ARRAY_SIZE(cfg->dma); i++) {
519 		if (dma_dev == cfg->dma[i].dev &&
520 		    channel == cfg->dma[i].channel) {
521 			data->dma[i].count += chunk_len;
522 		}
523 	}
524 
525 	/* Check transfer finished.
526 	 * The transmission of this chunk is complete if both the dma[TX].count
527 	 * and the dma[RX].count reach greater than or equal to the chunk_len.
528 	 * chunk_len is zero here means the transfer is already complete.
529 	 */
530 	if (spi_gd32_chunk_transfer_finished(dev)) {
531 		if (SPI_WORD_SIZE_GET(data->ctx.config->operation) == 8) {
532 			spi_context_update_tx(&data->ctx, 1, chunk_len);
533 			spi_context_update_rx(&data->ctx, 1, chunk_len);
534 		} else {
535 			spi_context_update_tx(&data->ctx, 2, chunk_len);
536 			spi_context_update_rx(&data->ctx, 2, chunk_len);
537 		}
538 
539 		if (spi_gd32_transfer_ongoing(data)) {
540 			/* Next chunk is available, reset the count and
541 			 * continue processing
542 			 */
543 			data->dma[TX].count = 0;
544 			data->dma[RX].count = 0;
545 		} else {
546 			/* All data is processed, complete the process */
547 			spi_context_complete(&data->ctx, dev, 0);
548 			return;
549 		}
550 	}
551 
552 	err = spi_gd32_start_dma_transceive(dev);
553 	if (err) {
554 		spi_gd32_complete(dev, err);
555 	}
556 }
557 
558 #endif /* DMA */
559 
spi_gd32_release(const struct device * dev,const struct spi_config * config)560 static int spi_gd32_release(const struct device *dev,
561 			    const struct spi_config *config)
562 {
563 	struct spi_gd32_data *data = dev->data;
564 
565 	spi_context_unlock_unconditionally(&data->ctx);
566 
567 	return 0;
568 }
569 
570 static struct spi_driver_api spi_gd32_driver_api = {
571 	.transceive = spi_gd32_transceive,
572 #ifdef CONFIG_SPI_ASYNC
573 	.transceive_async = spi_gd32_transceive_async,
574 #endif
575 	.release = spi_gd32_release
576 };
577 
spi_gd32_init(const struct device * dev)578 int spi_gd32_init(const struct device *dev)
579 {
580 	struct spi_gd32_data *data = dev->data;
581 	const struct spi_gd32_config *cfg = dev->config;
582 	int ret;
583 #ifdef CONFIG_SPI_GD32_DMA
584 	uint32_t ch_filter;
585 #endif
586 
587 	(void)clock_control_on(GD32_CLOCK_CONTROLLER,
588 			       (clock_control_subsys_t)&cfg->clkid);
589 
590 	(void)reset_line_toggle_dt(&cfg->reset);
591 
592 	ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
593 	if (ret) {
594 		LOG_ERR("Failed to apply pinctrl state");
595 		return ret;
596 	}
597 
598 #ifdef CONFIG_SPI_GD32_DMA
599 	if ((cfg->dma[RX].dev && !cfg->dma[TX].dev) ||
600 	    (cfg->dma[TX].dev && !cfg->dma[RX].dev)) {
601 		LOG_ERR("DMA must be enabled for both TX and RX channels");
602 		return -ENODEV;
603 	}
604 
605 	for (size_t i = 0; i < spi_gd32_dma_enabled_num(dev); i++) {
606 		if (!device_is_ready(cfg->dma[i].dev)) {
607 			LOG_ERR("DMA %s not ready", cfg->dma[i].dev->name);
608 			return -ENODEV;
609 		}
610 
611 		ch_filter = BIT(cfg->dma[i].channel);
612 		ret = dma_request_channel(cfg->dma[i].dev, &ch_filter);
613 		if (ret < 0) {
614 			LOG_ERR("dma_request_channel failed %d", ret);
615 			return ret;
616 		}
617 	}
618 #endif
619 
620 	ret = spi_context_cs_configure_all(&data->ctx);
621 	if (ret < 0) {
622 		return ret;
623 	}
624 
625 #ifdef CONFIG_SPI_GD32_INTERRUPT
626 	cfg->irq_configure(dev);
627 #endif
628 
629 	spi_context_unlock_unconditionally(&data->ctx);
630 
631 	return 0;
632 }
633 
634 #define DMA_INITIALIZER(idx, dir)                                              \
635 	{                                                                      \
636 		.dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(idx, dir)),     \
637 		.channel = DT_INST_DMAS_CELL_BY_NAME(idx, dir, channel),       \
638 		.slot = COND_CODE_1(                                           \
639 			DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_dma_v1),             \
640 			(DT_INST_DMAS_CELL_BY_NAME(idx, dir, slot)), (0)),     \
641 		.config = DT_INST_DMAS_CELL_BY_NAME(idx, dir, config),         \
642 		.fifo_threshold = COND_CODE_1(                                 \
643 			DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_dma_v1),             \
644 			(DT_INST_DMAS_CELL_BY_NAME(idx, dir, fifo_threshold)), \
645 			(0)),						  \
646 	}
647 
648 #define DMAS_DECL(idx)                                                         \
649 	{                                                                      \
650 		COND_CODE_1(DT_INST_DMAS_HAS_NAME(idx, rx),                    \
651 			    (DMA_INITIALIZER(idx, rx)), ({0})),                \
652 		COND_CODE_1(DT_INST_DMAS_HAS_NAME(idx, tx),                    \
653 			    (DMA_INITIALIZER(idx, tx)), ({0})),                \
654 	}
655 
656 #define GD32_IRQ_CONFIGURE(idx)						   \
657 	static void spi_gd32_irq_configure_##idx(void)			   \
658 	{								   \
659 		IRQ_CONNECT(DT_INST_IRQN(idx), DT_INST_IRQ(idx, priority), \
660 			    spi_gd32_isr,				   \
661 			    DEVICE_DT_INST_GET(idx), 0);		   \
662 		irq_enable(DT_INST_IRQN(idx));				   \
663 	}
664 
665 #define GD32_SPI_INIT(idx)						       \
666 	PINCTRL_DT_INST_DEFINE(idx);					       \
667 	IF_ENABLED(CONFIG_SPI_GD32_INTERRUPT, (GD32_IRQ_CONFIGURE(idx)));      \
668 	static struct spi_gd32_data spi_gd32_data_##idx = {		       \
669 		SPI_CONTEXT_INIT_LOCK(spi_gd32_data_##idx, ctx),	       \
670 		SPI_CONTEXT_INIT_SYNC(spi_gd32_data_##idx, ctx),	       \
671 		SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(idx), ctx) };      \
672 	static struct spi_gd32_config spi_gd32_config_##idx = {		       \
673 		.reg = DT_INST_REG_ADDR(idx),				       \
674 		.clkid = DT_INST_CLOCKS_CELL(idx, id),			       \
675 		.reset = RESET_DT_SPEC_INST_GET(idx),			       \
676 		.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(idx),		       \
677 		IF_ENABLED(CONFIG_SPI_GD32_DMA, (.dma = DMAS_DECL(idx),))      \
678 		IF_ENABLED(CONFIG_SPI_GD32_INTERRUPT,			       \
679 			   (.irq_configure = spi_gd32_irq_configure_##idx)) }; \
680 	DEVICE_DT_INST_DEFINE(idx, &spi_gd32_init, NULL,		       \
681 			      &spi_gd32_data_##idx, &spi_gd32_config_##idx,    \
682 			      POST_KERNEL, CONFIG_SPI_INIT_PRIORITY,	       \
683 			      &spi_gd32_driver_api);
684 
685 DT_INST_FOREACH_STATUS_OKAY(GD32_SPI_INIT)
686