1 /*
2 * Copyright (c) 2015 Intel Corporation.
3 * Copyright (c) 2023 Synopsys, Inc. All rights reserved.
4 * Copyright (c) 2023 Meta Platforms
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #define DT_DRV_COMPAT snps_designware_spi
10
11 /* spi_dw.c - Designware SPI driver implementation */
12
13 #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL
14 #include <zephyr/logging/log.h>
15 LOG_MODULE_REGISTER(spi_dw);
16
17 #include <errno.h>
18
19 #include <zephyr/kernel.h>
20 #include <zephyr/arch/cpu.h>
21
22 #include <zephyr/device.h>
23 #include <zephyr/init.h>
24 #include <zephyr/pm/device.h>
25
26 #include <zephyr/sys/sys_io.h>
27 #include <zephyr/sys/util.h>
28
29 #ifdef CONFIG_IOAPIC
30 #include <zephyr/drivers/interrupt_controller/ioapic.h>
31 #endif
32
33 #include <zephyr/drivers/spi.h>
34 #include <zephyr/irq.h>
35
36 #include "spi_dw.h"
37 #include "spi_context.h"
38
39 #ifdef CONFIG_PINCTRL
40 #include <zephyr/drivers/pinctrl.h>
41 #endif
42
spi_dw_is_slave(struct spi_dw_data * spi)43 static inline bool spi_dw_is_slave(struct spi_dw_data *spi)
44 {
45 return (IS_ENABLED(CONFIG_SPI_SLAVE) &&
46 spi_context_is_slave(&spi->ctx));
47 }
48
completed(const struct device * dev,int error)49 static void completed(const struct device *dev, int error)
50 {
51 struct spi_dw_data *spi = dev->data;
52 struct spi_context *ctx = &spi->ctx;
53
54 if (error) {
55 goto out;
56 }
57
58 if (spi_context_tx_on(&spi->ctx) ||
59 spi_context_rx_on(&spi->ctx)) {
60 return;
61 }
62
63 out:
64 /* need to give time for FIFOs to drain before issuing more commands */
65 while (test_bit_sr_busy(dev)) {
66 }
67
68 /* Disabling interrupts */
69 write_imr(dev, DW_SPI_IMR_MASK);
70 /* Disabling the controller */
71 clear_bit_ssienr(dev);
72
73 if (!spi_dw_is_slave(spi)) {
74 if (spi_cs_is_gpio(ctx->config)) {
75 spi_context_cs_control(ctx, false);
76 } else {
77 write_ser(dev, 0);
78 }
79 }
80
81 LOG_DBG("SPI transaction completed %s error",
82 error ? "with" : "without");
83
84 spi_context_complete(&spi->ctx, dev, error);
85 }
86
push_data(const struct device * dev)87 static void push_data(const struct device *dev)
88 {
89 const struct spi_dw_config *info = dev->config;
90 struct spi_dw_data *spi = dev->data;
91 uint32_t data = 0U;
92 uint32_t f_tx;
93
94 if (spi_context_rx_on(&spi->ctx)) {
95 f_tx = info->fifo_depth - read_txflr(dev) -
96 read_rxflr(dev);
97 if ((int)f_tx < 0) {
98 f_tx = 0U; /* if rx-fifo is full, hold off tx */
99 }
100 } else {
101 f_tx = info->fifo_depth - read_txflr(dev);
102 }
103
104 while (f_tx) {
105 if (spi_context_tx_buf_on(&spi->ctx)) {
106 switch (spi->dfs) {
107 case 1:
108 data = UNALIGNED_GET((uint8_t *)
109 (spi->ctx.tx_buf));
110 break;
111 case 2:
112 data = UNALIGNED_GET((uint16_t *)
113 (spi->ctx.tx_buf));
114 break;
115 case 4:
116 data = UNALIGNED_GET((uint32_t *)
117 (spi->ctx.tx_buf));
118 break;
119 }
120 } else if (spi_context_rx_on(&spi->ctx)) {
121 /* No need to push more than necessary */
122 if ((int)(spi->ctx.rx_len - spi->fifo_diff) <= 0) {
123 break;
124 }
125
126 data = 0U;
127 } else if (spi_context_tx_on(&spi->ctx)) {
128 data = 0U;
129 } else {
130 /* Nothing to push anymore */
131 break;
132 }
133
134 write_dr(dev, data);
135
136 spi_context_update_tx(&spi->ctx, spi->dfs, 1);
137 spi->fifo_diff++;
138
139 f_tx--;
140 }
141
142 if (!spi_context_tx_on(&spi->ctx)) {
143 /* prevents any further interrupts demanding TX fifo fill */
144 write_txftlr(dev, 0);
145 }
146 }
147
pull_data(const struct device * dev)148 static void pull_data(const struct device *dev)
149 {
150 const struct spi_dw_config *info = dev->config;
151 struct spi_dw_data *spi = dev->data;
152
153 while (read_rxflr(dev)) {
154 uint32_t data = read_dr(dev);
155
156 if (spi_context_rx_buf_on(&spi->ctx)) {
157 switch (spi->dfs) {
158 case 1:
159 UNALIGNED_PUT(data, (uint8_t *)spi->ctx.rx_buf);
160 break;
161 case 2:
162 UNALIGNED_PUT(data, (uint16_t *)spi->ctx.rx_buf);
163 break;
164 case 4:
165 UNALIGNED_PUT(data, (uint32_t *)spi->ctx.rx_buf);
166 break;
167 }
168 }
169
170 spi_context_update_rx(&spi->ctx, spi->dfs, 1);
171 spi->fifo_diff--;
172 }
173
174 if (!spi->ctx.rx_len && spi->ctx.tx_len < info->fifo_depth) {
175 write_rxftlr(dev, spi->ctx.tx_len - 1);
176 } else if (read_rxftlr(dev) >= spi->ctx.rx_len) {
177 write_rxftlr(dev, spi->ctx.rx_len - 1);
178 }
179 }
180
spi_dw_configure(const struct device * dev,struct spi_dw_data * spi,const struct spi_config * config)181 static int spi_dw_configure(const struct device *dev,
182 struct spi_dw_data *spi,
183 const struct spi_config *config)
184 {
185 const struct spi_dw_config *info = dev->config;
186 uint32_t ctrlr0 = 0U;
187
188 LOG_DBG("%p (prev %p)", config, spi->ctx.config);
189
190 if (spi_context_configured(&spi->ctx, config)) {
191 /* Nothing to do */
192 return 0;
193 }
194
195 if (config->operation & SPI_HALF_DUPLEX) {
196 LOG_ERR("Half-duplex not supported");
197 return -ENOTSUP;
198 }
199
200 /* Verify if requested op mode is relevant to this controller */
201 if (config->operation & SPI_OP_MODE_SLAVE) {
202 if (!(info->serial_target)) {
203 LOG_ERR("Slave mode not supported");
204 return -ENOTSUP;
205 }
206 } else {
207 if (info->serial_target) {
208 LOG_ERR("Master mode not supported");
209 return -ENOTSUP;
210 }
211 }
212
213 if ((config->operation & SPI_TRANSFER_LSB) ||
214 (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) &&
215 (config->operation & (SPI_LINES_DUAL |
216 SPI_LINES_QUAD | SPI_LINES_OCTAL)))) {
217 LOG_ERR("Unsupported configuration");
218 return -EINVAL;
219 }
220
221 if (info->max_xfer_size < SPI_WORD_SIZE_GET(config->operation)) {
222 LOG_ERR("Max xfer size is %u, word size of %u not allowed",
223 info->max_xfer_size, SPI_WORD_SIZE_GET(config->operation));
224 return -ENOTSUP;
225 }
226
227 /* Word size */
228 if (info->max_xfer_size == 32) {
229 ctrlr0 |= DW_SPI_CTRLR0_DFS_32(SPI_WORD_SIZE_GET(config->operation));
230 } else {
231 ctrlr0 |= DW_SPI_CTRLR0_DFS_16(SPI_WORD_SIZE_GET(config->operation));
232 }
233
234 /* Determine how many bytes are required per-frame */
235 spi->dfs = SPI_WS_TO_DFS(SPI_WORD_SIZE_GET(config->operation));
236
237 /* SPI mode */
238 if (SPI_MODE_GET(config->operation) & SPI_MODE_CPOL) {
239 ctrlr0 |= DW_SPI_CTRLR0_SCPOL;
240 }
241
242 if (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA) {
243 ctrlr0 |= DW_SPI_CTRLR0_SCPH;
244 }
245
246 if (SPI_MODE_GET(config->operation) & SPI_MODE_LOOP) {
247 ctrlr0 |= DW_SPI_CTRLR0_SRL;
248 }
249
250 /* Installing the configuration */
251 write_ctrlr0(dev, ctrlr0);
252
253 /* At this point, it's mandatory to set this on the context! */
254 spi->ctx.config = config;
255
256 if (!spi_dw_is_slave(spi)) {
257 /* Baud rate and Slave select, for master only */
258 write_baudr(dev, SPI_DW_CLK_DIVIDER(info->clock_frequency,
259 config->frequency));
260 }
261
262 if (spi_dw_is_slave(spi)) {
263 LOG_DBG("Installed slave config %p:"
264 " ws/dfs %u/%u, mode %u/%u/%u",
265 config,
266 SPI_WORD_SIZE_GET(config->operation), spi->dfs,
267 (SPI_MODE_GET(config->operation) &
268 SPI_MODE_CPOL) ? 1 : 0,
269 (SPI_MODE_GET(config->operation) &
270 SPI_MODE_CPHA) ? 1 : 0,
271 (SPI_MODE_GET(config->operation) &
272 SPI_MODE_LOOP) ? 1 : 0);
273 } else {
274 LOG_DBG("Installed master config %p: freq %uHz (div = %u),"
275 " ws/dfs %u/%u, mode %u/%u/%u, slave %u",
276 config, config->frequency,
277 SPI_DW_CLK_DIVIDER(info->clock_frequency,
278 config->frequency),
279 SPI_WORD_SIZE_GET(config->operation), spi->dfs,
280 (SPI_MODE_GET(config->operation) &
281 SPI_MODE_CPOL) ? 1 : 0,
282 (SPI_MODE_GET(config->operation) &
283 SPI_MODE_CPHA) ? 1 : 0,
284 (SPI_MODE_GET(config->operation) &
285 SPI_MODE_LOOP) ? 1 : 0,
286 config->slave);
287 }
288
289 return 0;
290 }
291
spi_dw_compute_ndf(const struct spi_buf * rx_bufs,size_t rx_count,uint8_t dfs)292 static uint32_t spi_dw_compute_ndf(const struct spi_buf *rx_bufs,
293 size_t rx_count, uint8_t dfs)
294 {
295 uint32_t len = 0U;
296
297 for (; rx_count; rx_bufs++, rx_count--) {
298 if (len > (UINT16_MAX - rx_bufs->len)) {
299 goto error;
300 }
301
302 len += rx_bufs->len;
303 }
304
305 if (len) {
306 return (len / dfs) - 1;
307 }
308 error:
309 return UINT32_MAX;
310 }
311
spi_dw_update_txftlr(const struct device * dev,struct spi_dw_data * spi)312 static void spi_dw_update_txftlr(const struct device *dev,
313 struct spi_dw_data *spi)
314 {
315 const struct spi_dw_config *info = dev->config;
316 uint32_t dw_spi_txftlr_dflt = (info->fifo_depth * 1) / 2;
317 uint32_t reg_data = dw_spi_txftlr_dflt;
318
319 if (spi_dw_is_slave(spi)) {
320 if (!spi->ctx.tx_len) {
321 reg_data = 0U;
322 } else if (spi->ctx.tx_len < dw_spi_txftlr_dflt) {
323 reg_data = spi->ctx.tx_len - 1;
324 }
325 }
326
327 LOG_DBG("TxFTLR: %u", reg_data);
328
329 write_txftlr(dev, reg_data);
330 }
331
transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)332 static int transceive(const struct device *dev,
333 const struct spi_config *config,
334 const struct spi_buf_set *tx_bufs,
335 const struct spi_buf_set *rx_bufs,
336 bool asynchronous,
337 spi_callback_t cb,
338 void *userdata)
339 {
340 const struct spi_dw_config *info = dev->config;
341 struct spi_dw_data *spi = dev->data;
342 uint32_t tmod = DW_SPI_CTRLR0_TMOD_TX_RX;
343 uint32_t dw_spi_rxftlr_dflt = (info->fifo_depth * 5) / 8;
344 uint32_t reg_data;
345 int ret;
346
347 spi_context_lock(&spi->ctx, asynchronous, cb, userdata, config);
348
349 #ifdef CONFIG_PM_DEVICE
350 if (!pm_device_is_busy(dev)) {
351 pm_device_busy_set(dev);
352 }
353 #endif /* CONFIG_PM_DEVICE */
354
355 /* Configure */
356 ret = spi_dw_configure(dev, spi, config);
357 if (ret) {
358 goto out;
359 }
360
361 if (!rx_bufs || !rx_bufs->buffers) {
362 tmod = DW_SPI_CTRLR0_TMOD_TX;
363 } else if (!tx_bufs || !tx_bufs->buffers) {
364 tmod = DW_SPI_CTRLR0_TMOD_RX;
365 }
366
367 /* ToDo: add a way to determine EEPROM mode */
368
369 if (tmod >= DW_SPI_CTRLR0_TMOD_RX &&
370 !spi_dw_is_slave(spi)) {
371 reg_data = spi_dw_compute_ndf(rx_bufs->buffers,
372 rx_bufs->count,
373 spi->dfs);
374 if (reg_data == UINT32_MAX) {
375 ret = -EINVAL;
376 goto out;
377 }
378
379 write_ctrlr1(dev, reg_data);
380 } else {
381 write_ctrlr1(dev, 0);
382 }
383
384 if (spi_dw_is_slave(spi)) {
385 /* Enabling MISO line relevantly */
386 if (tmod == DW_SPI_CTRLR0_TMOD_RX) {
387 tmod |= DW_SPI_CTRLR0_SLV_OE;
388 } else {
389 tmod &= ~DW_SPI_CTRLR0_SLV_OE;
390 }
391 }
392
393 /* Updating TMOD in CTRLR0 register */
394 reg_data = read_ctrlr0(dev);
395 reg_data &= ~DW_SPI_CTRLR0_TMOD_RESET;
396 reg_data |= tmod;
397
398 write_ctrlr0(dev, reg_data);
399
400 /* Set buffers info */
401 spi_context_buffers_setup(&spi->ctx, tx_bufs, rx_bufs, spi->dfs);
402
403 spi->fifo_diff = 0U;
404
405 /* Tx Threshold */
406 spi_dw_update_txftlr(dev, spi);
407
408 /* Does Rx thresholds needs to be lower? */
409 reg_data = dw_spi_rxftlr_dflt;
410
411 if (spi_dw_is_slave(spi)) {
412 if (spi->ctx.rx_len &&
413 spi->ctx.rx_len < dw_spi_rxftlr_dflt) {
414 reg_data = spi->ctx.rx_len - 1;
415 }
416 } else {
417 if (spi->ctx.rx_len && spi->ctx.rx_len < info->fifo_depth) {
418 reg_data = spi->ctx.rx_len - 1;
419 }
420 }
421
422 /* Rx Threshold */
423 write_rxftlr(dev, reg_data);
424
425 /* Enable interrupts */
426 reg_data = !rx_bufs ?
427 DW_SPI_IMR_UNMASK & DW_SPI_IMR_MASK_RX :
428 DW_SPI_IMR_UNMASK;
429 write_imr(dev, reg_data);
430
431 if (!spi_dw_is_slave(spi)) {
432 /* if cs is not defined as gpio, use hw cs */
433 if (spi_cs_is_gpio(config)) {
434 spi_context_cs_control(&spi->ctx, true);
435 } else {
436 write_ser(dev, BIT(config->slave));
437 }
438 }
439
440 LOG_DBG("Enabling controller");
441 set_bit_ssienr(dev);
442
443 ret = spi_context_wait_for_completion(&spi->ctx);
444
445 #ifdef CONFIG_SPI_SLAVE
446 if (spi_context_is_slave(&spi->ctx) && !ret) {
447 ret = spi->ctx.recv_frames;
448 }
449 #endif /* CONFIG_SPI_SLAVE */
450
451 out:
452 spi_context_release(&spi->ctx, ret);
453
454 pm_device_busy_clear(dev);
455
456 return ret;
457 }
458
spi_dw_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)459 static int spi_dw_transceive(const struct device *dev,
460 const struct spi_config *config,
461 const struct spi_buf_set *tx_bufs,
462 const struct spi_buf_set *rx_bufs)
463 {
464 LOG_DBG("%p, %p, %p", dev, tx_bufs, rx_bufs);
465
466 return transceive(dev, config, tx_bufs, rx_bufs, false, NULL, NULL);
467 }
468
469 #ifdef CONFIG_SPI_ASYNC
spi_dw_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)470 static int spi_dw_transceive_async(const struct device *dev,
471 const struct spi_config *config,
472 const struct spi_buf_set *tx_bufs,
473 const struct spi_buf_set *rx_bufs,
474 spi_callback_t cb,
475 void *userdata)
476 {
477 LOG_DBG("%p, %p, %p, %p, %p", dev, tx_bufs, rx_bufs, cb, userdata);
478
479 return transceive(dev, config, tx_bufs, rx_bufs, true, cb, userdata);
480 }
481 #endif /* CONFIG_SPI_ASYNC */
482
spi_dw_release(const struct device * dev,const struct spi_config * config)483 static int spi_dw_release(const struct device *dev,
484 const struct spi_config *config)
485 {
486 struct spi_dw_data *spi = dev->data;
487
488 if (!spi_context_configured(&spi->ctx, config)) {
489 return -EINVAL;
490 }
491
492 spi_context_unlock_unconditionally(&spi->ctx);
493
494 return 0;
495 }
496
spi_dw_isr(const struct device * dev)497 void spi_dw_isr(const struct device *dev)
498 {
499 uint32_t int_status;
500 int error;
501
502 int_status = read_isr(dev);
503
504 LOG_DBG("SPI %p int_status 0x%x - (tx: %d, rx: %d)", dev, int_status,
505 read_txflr(dev), read_rxflr(dev));
506
507 if (int_status & DW_SPI_ISR_ERRORS_MASK) {
508 error = -EIO;
509 goto out;
510 }
511
512 error = 0;
513
514 if (int_status & DW_SPI_ISR_RXFIS) {
515 pull_data(dev);
516 }
517
518 if (int_status & DW_SPI_ISR_TXEIS) {
519 push_data(dev);
520 }
521
522 out:
523 clear_interrupts(dev);
524 completed(dev, error);
525 }
526
527 static const struct spi_driver_api dw_spi_api = {
528 .transceive = spi_dw_transceive,
529 #ifdef CONFIG_SPI_ASYNC
530 .transceive_async = spi_dw_transceive_async,
531 #endif /* CONFIG_SPI_ASYNC */
532 .release = spi_dw_release,
533 };
534
spi_dw_init(const struct device * dev)535 int spi_dw_init(const struct device *dev)
536 {
537 int err;
538 const struct spi_dw_config *info = dev->config;
539 struct spi_dw_data *spi = dev->data;
540
541 #ifdef CONFIG_PINCTRL
542 pinctrl_apply_state(info->pcfg, PINCTRL_STATE_DEFAULT);
543 #endif
544
545 DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE);
546
547 info->config_func();
548
549 /* Masking interrupt and making sure controller is disabled */
550 write_imr(dev, DW_SPI_IMR_MASK);
551 clear_bit_ssienr(dev);
552
553 LOG_DBG("Designware SPI driver initialized on device: %p", dev);
554
555 err = spi_context_cs_configure_all(&spi->ctx);
556 if (err < 0) {
557 return err;
558 }
559
560 spi_context_unlock_unconditionally(&spi->ctx);
561
562 return 0;
563 }
564
565 #define SPI_CFG_IRQS_SINGLE_ERR_LINE(inst) \
566 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, rx_avail, irq), \
567 DT_INST_IRQ_BY_NAME(inst, rx_avail, priority), \
568 spi_dw_isr, DEVICE_DT_INST_GET(inst), \
569 0); \
570 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, tx_req, irq), \
571 DT_INST_IRQ_BY_NAME(inst, tx_req, priority), \
572 spi_dw_isr, DEVICE_DT_INST_GET(inst), \
573 0); \
574 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, err_int, irq), \
575 DT_INST_IRQ_BY_NAME(inst, err_int, priority), \
576 spi_dw_isr, DEVICE_DT_INST_GET(inst), \
577 0); \
578 irq_enable(DT_INST_IRQ_BY_NAME(inst, rx_avail, irq)); \
579 irq_enable(DT_INST_IRQ_BY_NAME(inst, tx_req, irq)); \
580 irq_enable(DT_INST_IRQ_BY_NAME(inst, err_int, irq));
581
582 #define SPI_CFG_IRQS_MULTIPLE_ERR_LINES(inst) \
583 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, rx_avail, irq), \
584 DT_INST_IRQ_BY_NAME(inst, rx_avail, priority), \
585 spi_dw_isr, DEVICE_DT_INST_GET(inst), \
586 0); \
587 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, tx_req, irq), \
588 DT_INST_IRQ_BY_NAME(inst, tx_req, priority), \
589 spi_dw_isr, DEVICE_DT_INST_GET(inst), \
590 0); \
591 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, txo_err, irq), \
592 DT_INST_IRQ_BY_NAME(inst, txo_err, priority), \
593 spi_dw_isr, DEVICE_DT_INST_GET(inst), \
594 0); \
595 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, rxo_err, irq), \
596 DT_INST_IRQ_BY_NAME(inst, rxo_err, priority), \
597 spi_dw_isr, DEVICE_DT_INST_GET(inst), \
598 0); \
599 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, rxu_err, irq), \
600 DT_INST_IRQ_BY_NAME(inst, rxu_err, priority), \
601 spi_dw_isr, DEVICE_DT_INST_GET(inst), \
602 0); \
603 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, mst_err, irq), \
604 DT_INST_IRQ_BY_NAME(inst, mst_err, priority), \
605 spi_dw_isr, DEVICE_DT_INST_GET(inst), \
606 0); \
607 irq_enable(DT_INST_IRQ_BY_NAME(inst, rx_avail, irq)); \
608 irq_enable(DT_INST_IRQ_BY_NAME(inst, tx_req, irq)); \
609 irq_enable(DT_INST_IRQ_BY_NAME(inst, txo_err, irq)); \
610 irq_enable(DT_INST_IRQ_BY_NAME(inst, rxo_err, irq)); \
611 irq_enable(DT_INST_IRQ_BY_NAME(inst, rxu_err, irq)); \
612 irq_enable(DT_INST_IRQ_BY_NAME(inst, mst_err, irq));
613
614 #define SPI_DW_IRQ_HANDLER(inst) \
615 void spi_dw_irq_config_##inst(void) \
616 { \
617 COND_CODE_1(IS_EQ(DT_NUM_IRQS(DT_DRV_INST(inst)), 1), \
618 (IRQ_CONNECT(DT_INST_IRQN(inst), \
619 DT_INST_IRQ(inst, priority), \
620 spi_dw_isr, DEVICE_DT_INST_GET(inst), \
621 0); \
622 irq_enable(DT_INST_IRQN(inst));), \
623 (COND_CODE_1(IS_EQ(DT_NUM_IRQS(DT_DRV_INST(inst)), 3), \
624 (SPI_CFG_IRQS_SINGLE_ERR_LINE(inst)), \
625 (SPI_CFG_IRQS_MULTIPLE_ERR_LINES(inst))))) \
626 }
627
628 #define SPI_DW_INIT(inst) \
629 IF_ENABLED(CONFIG_PINCTRL, (PINCTRL_DT_INST_DEFINE(inst);)) \
630 SPI_DW_IRQ_HANDLER(inst); \
631 static struct spi_dw_data spi_dw_data_##inst = { \
632 SPI_CONTEXT_INIT_LOCK(spi_dw_data_##inst, ctx), \
633 SPI_CONTEXT_INIT_SYNC(spi_dw_data_##inst, ctx), \
634 SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(inst), ctx) \
635 }; \
636 static const struct spi_dw_config spi_dw_config_##inst = { \
637 DEVICE_MMIO_ROM_INIT(DT_DRV_INST(inst)), \
638 .clock_frequency = COND_CODE_1( \
639 DT_NODE_HAS_PROP(DT_INST_PHANDLE(inst, clocks), clock_frequency), \
640 (DT_INST_PROP_BY_PHANDLE(inst, clocks, clock_frequency)), \
641 (DT_INST_PROP(inst, clock_frequency))), \
642 .config_func = spi_dw_irq_config_##inst, \
643 .serial_target = DT_INST_PROP(inst, serial_target), \
644 .fifo_depth = DT_INST_PROP(inst, fifo_depth), \
645 .max_xfer_size = DT_INST_PROP(inst, max_xfer_size), \
646 IF_ENABLED(CONFIG_PINCTRL, (.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst),)) \
647 COND_CODE_1(DT_INST_PROP(inst, aux_reg), \
648 (.read_func = aux_reg_read, \
649 .write_func = aux_reg_write, \
650 .set_bit_func = aux_reg_set_bit, \
651 .clear_bit_func = aux_reg_clear_bit, \
652 .test_bit_func = aux_reg_test_bit,), \
653 (.read_func = reg_read, \
654 .write_func = reg_write, \
655 .set_bit_func = reg_set_bit, \
656 .clear_bit_func = reg_clear_bit, \
657 .test_bit_func = reg_test_bit,)) \
658 }; \
659 DEVICE_DT_INST_DEFINE(inst, \
660 spi_dw_init, \
661 NULL, \
662 &spi_dw_data_##inst, \
663 &spi_dw_config_##inst, \
664 POST_KERNEL, \
665 CONFIG_SPI_INIT_PRIORITY, \
666 &dw_spi_api);
667
668 DT_INST_FOREACH_STATUS_OKAY(SPI_DW_INIT)
669