1 /*
2 * Copyright (c) 2015 Intel Corporation.
3 * Copyright (c) 2023 Synopsys, Inc. All rights reserved.
4 * Copyright (c) 2023 Meta Platforms
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #define DT_DRV_COMPAT snps_designware_spi
10
11 /* spi_dw.c - Designware SPI driver implementation */
12
13 #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL
14 #include <zephyr/logging/log.h>
15 LOG_MODULE_REGISTER(spi_dw);
16
17 #include <errno.h>
18
19 #include <zephyr/kernel.h>
20 #include <zephyr/arch/cpu.h>
21
22 #include <zephyr/device.h>
23 #include <zephyr/init.h>
24 #include <zephyr/pm/device.h>
25
26 #include <zephyr/sys/sys_io.h>
27 #include <zephyr/sys/util.h>
28
29 #ifdef CONFIG_IOAPIC
30 #include <zephyr/drivers/interrupt_controller/ioapic.h>
31 #endif
32
33 #include <zephyr/drivers/spi.h>
34 #include <zephyr/drivers/spi/rtio.h>
35 #include <zephyr/irq.h>
36
37 #include "spi_dw.h"
38 #include "spi_context.h"
39
40 #ifdef CONFIG_PINCTRL
41 #include <zephyr/drivers/pinctrl.h>
42 #endif
43
spi_dw_is_slave(struct spi_dw_data * spi)44 static inline bool spi_dw_is_slave(struct spi_dw_data *spi)
45 {
46 return (IS_ENABLED(CONFIG_SPI_SLAVE) &&
47 spi_context_is_slave(&spi->ctx));
48 }
49
completed(const struct device * dev,int error)50 static void completed(const struct device *dev, int error)
51 {
52 struct spi_dw_data *spi = dev->data;
53 struct spi_context *ctx = &spi->ctx;
54
55 if (error) {
56 goto out;
57 }
58
59 if (spi_context_tx_on(&spi->ctx) ||
60 spi_context_rx_on(&spi->ctx)) {
61 return;
62 }
63
64 out:
65 /* need to give time for FIFOs to drain before issuing more commands */
66 while (test_bit_sr_busy(dev)) {
67 }
68
69 /* Disabling interrupts */
70 write_imr(dev, DW_SPI_IMR_MASK);
71 /* Disabling the controller */
72 clear_bit_ssienr(dev);
73
74 if (!spi_dw_is_slave(spi)) {
75 if (spi_cs_is_gpio(ctx->config)) {
76 spi_context_cs_control(ctx, false);
77 } else {
78 write_ser(dev, 0);
79 }
80 }
81
82 LOG_DBG("SPI transaction completed %s error",
83 error ? "with" : "without");
84
85 spi_context_complete(&spi->ctx, dev, error);
86 }
87
push_data(const struct device * dev)88 static void push_data(const struct device *dev)
89 {
90 const struct spi_dw_config *info = dev->config;
91 struct spi_dw_data *spi = dev->data;
92 uint32_t data = 0U;
93 uint32_t f_tx;
94
95 if (spi_context_rx_on(&spi->ctx)) {
96 f_tx = info->fifo_depth - read_txflr(dev) -
97 read_rxflr(dev);
98 if ((int)f_tx < 0) {
99 f_tx = 0U; /* if rx-fifo is full, hold off tx */
100 }
101 } else {
102 f_tx = info->fifo_depth - read_txflr(dev);
103 }
104
105 while (f_tx) {
106 if (spi_context_tx_buf_on(&spi->ctx)) {
107 switch (spi->dfs) {
108 case 1:
109 data = UNALIGNED_GET((uint8_t *)
110 (spi->ctx.tx_buf));
111 break;
112 case 2:
113 data = UNALIGNED_GET((uint16_t *)
114 (spi->ctx.tx_buf));
115 break;
116 case 4:
117 data = UNALIGNED_GET((uint32_t *)
118 (spi->ctx.tx_buf));
119 break;
120 }
121 } else if (spi_context_rx_on(&spi->ctx)) {
122 /* No need to push more than necessary */
123 if ((int)(spi->ctx.rx_len - spi->fifo_diff) <= 0) {
124 break;
125 }
126
127 data = 0U;
128 } else if (spi_context_tx_on(&spi->ctx)) {
129 data = 0U;
130 } else {
131 /* Nothing to push anymore */
132 break;
133 }
134
135 write_dr(dev, data);
136
137 spi_context_update_tx(&spi->ctx, spi->dfs, 1);
138 spi->fifo_diff++;
139
140 f_tx--;
141 }
142
143 if (!spi_context_tx_on(&spi->ctx)) {
144 /* prevents any further interrupts demanding TX fifo fill */
145 write_txftlr(dev, 0);
146 }
147 }
148
pull_data(const struct device * dev)149 static void pull_data(const struct device *dev)
150 {
151 const struct spi_dw_config *info = dev->config;
152 struct spi_dw_data *spi = dev->data;
153
154 while (read_rxflr(dev)) {
155 uint32_t data = read_dr(dev);
156
157 if (spi_context_rx_buf_on(&spi->ctx)) {
158 switch (spi->dfs) {
159 case 1:
160 UNALIGNED_PUT(data, (uint8_t *)spi->ctx.rx_buf);
161 break;
162 case 2:
163 UNALIGNED_PUT(data, (uint16_t *)spi->ctx.rx_buf);
164 break;
165 case 4:
166 UNALIGNED_PUT(data, (uint32_t *)spi->ctx.rx_buf);
167 break;
168 }
169 }
170
171 spi_context_update_rx(&spi->ctx, spi->dfs, 1);
172 spi->fifo_diff--;
173 }
174
175 if (!spi->ctx.rx_len && spi->ctx.tx_len < info->fifo_depth) {
176 write_rxftlr(dev, spi->ctx.tx_len - 1);
177 } else if (read_rxftlr(dev) >= spi->ctx.rx_len) {
178 write_rxftlr(dev, spi->ctx.rx_len - 1);
179 }
180 }
181
spi_dw_configure(const struct device * dev,struct spi_dw_data * spi,const struct spi_config * config)182 static int spi_dw_configure(const struct device *dev,
183 struct spi_dw_data *spi,
184 const struct spi_config *config)
185 {
186 const struct spi_dw_config *info = dev->config;
187 uint32_t ctrlr0 = 0U;
188
189 LOG_DBG("%p (prev %p)", config, spi->ctx.config);
190
191 if (spi_context_configured(&spi->ctx, config)) {
192 /* Nothing to do */
193 return 0;
194 }
195
196 if (config->operation & SPI_HALF_DUPLEX) {
197 LOG_ERR("Half-duplex not supported");
198 return -ENOTSUP;
199 }
200
201 /* Verify if requested op mode is relevant to this controller */
202 if (config->operation & SPI_OP_MODE_SLAVE) {
203 if (!(info->serial_target)) {
204 LOG_ERR("Slave mode not supported");
205 return -ENOTSUP;
206 }
207 } else {
208 if (info->serial_target) {
209 LOG_ERR("Master mode not supported");
210 return -ENOTSUP;
211 }
212 }
213
214 if ((config->operation & SPI_TRANSFER_LSB) ||
215 (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) &&
216 (config->operation & (SPI_LINES_DUAL |
217 SPI_LINES_QUAD | SPI_LINES_OCTAL)))) {
218 LOG_ERR("Unsupported configuration");
219 return -EINVAL;
220 }
221
222 if (info->max_xfer_size < SPI_WORD_SIZE_GET(config->operation)) {
223 LOG_ERR("Max xfer size is %u, word size of %u not allowed",
224 info->max_xfer_size, SPI_WORD_SIZE_GET(config->operation));
225 return -ENOTSUP;
226 }
227
228 /* Word size */
229 if (info->max_xfer_size == 32) {
230 ctrlr0 |= DW_SPI_CTRLR0_DFS_32(SPI_WORD_SIZE_GET(config->operation));
231 } else {
232 ctrlr0 |= DW_SPI_CTRLR0_DFS_16(SPI_WORD_SIZE_GET(config->operation));
233 }
234
235 /* Determine how many bytes are required per-frame */
236 spi->dfs = SPI_WS_TO_DFS(SPI_WORD_SIZE_GET(config->operation));
237
238 /* SPI mode */
239 if (SPI_MODE_GET(config->operation) & SPI_MODE_CPOL) {
240 ctrlr0 |= DW_SPI_CTRLR0_SCPOL;
241 }
242
243 if (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA) {
244 ctrlr0 |= DW_SPI_CTRLR0_SCPH;
245 }
246
247 if (SPI_MODE_GET(config->operation) & SPI_MODE_LOOP) {
248 ctrlr0 |= DW_SPI_CTRLR0_SRL;
249 }
250
251 /* Installing the configuration */
252 write_ctrlr0(dev, ctrlr0);
253
254 /* At this point, it's mandatory to set this on the context! */
255 spi->ctx.config = config;
256
257 if (!spi_dw_is_slave(spi)) {
258 /* Baud rate and Slave select, for master only */
259 write_baudr(dev, SPI_DW_CLK_DIVIDER(info->clock_frequency,
260 config->frequency));
261 }
262
263 if (spi_dw_is_slave(spi)) {
264 LOG_DBG("Installed slave config %p:"
265 " ws/dfs %u/%u, mode %u/%u/%u",
266 config,
267 SPI_WORD_SIZE_GET(config->operation), spi->dfs,
268 (SPI_MODE_GET(config->operation) &
269 SPI_MODE_CPOL) ? 1 : 0,
270 (SPI_MODE_GET(config->operation) &
271 SPI_MODE_CPHA) ? 1 : 0,
272 (SPI_MODE_GET(config->operation) &
273 SPI_MODE_LOOP) ? 1 : 0);
274 } else {
275 LOG_DBG("Installed master config %p: freq %uHz (div = %u),"
276 " ws/dfs %u/%u, mode %u/%u/%u, slave %u",
277 config, config->frequency,
278 SPI_DW_CLK_DIVIDER(info->clock_frequency,
279 config->frequency),
280 SPI_WORD_SIZE_GET(config->operation), spi->dfs,
281 (SPI_MODE_GET(config->operation) &
282 SPI_MODE_CPOL) ? 1 : 0,
283 (SPI_MODE_GET(config->operation) &
284 SPI_MODE_CPHA) ? 1 : 0,
285 (SPI_MODE_GET(config->operation) &
286 SPI_MODE_LOOP) ? 1 : 0,
287 config->slave);
288 }
289
290 return 0;
291 }
292
spi_dw_compute_ndf(const struct spi_buf * rx_bufs,size_t rx_count,uint8_t dfs)293 static uint32_t spi_dw_compute_ndf(const struct spi_buf *rx_bufs,
294 size_t rx_count, uint8_t dfs)
295 {
296 uint32_t len = 0U;
297
298 for (; rx_count; rx_bufs++, rx_count--) {
299 if (len > (UINT16_MAX - rx_bufs->len)) {
300 goto error;
301 }
302
303 len += rx_bufs->len;
304 }
305
306 if (len) {
307 return (len / dfs) - 1;
308 }
309 error:
310 return UINT32_MAX;
311 }
312
spi_dw_update_txftlr(const struct device * dev,struct spi_dw_data * spi)313 static void spi_dw_update_txftlr(const struct device *dev,
314 struct spi_dw_data *spi)
315 {
316 const struct spi_dw_config *info = dev->config;
317 uint32_t dw_spi_txftlr_dflt = (info->fifo_depth * 1) / 2;
318 uint32_t reg_data = dw_spi_txftlr_dflt;
319
320 if (spi_dw_is_slave(spi)) {
321 if (!spi->ctx.tx_len) {
322 reg_data = 0U;
323 } else if (spi->ctx.tx_len < dw_spi_txftlr_dflt) {
324 reg_data = spi->ctx.tx_len - 1;
325 }
326 }
327
328 LOG_DBG("TxFTLR: %u", reg_data);
329
330 write_txftlr(dev, reg_data);
331 }
332
transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)333 static int transceive(const struct device *dev,
334 const struct spi_config *config,
335 const struct spi_buf_set *tx_bufs,
336 const struct spi_buf_set *rx_bufs,
337 bool asynchronous,
338 spi_callback_t cb,
339 void *userdata)
340 {
341 const struct spi_dw_config *info = dev->config;
342 struct spi_dw_data *spi = dev->data;
343 uint32_t tmod = DW_SPI_CTRLR0_TMOD_TX_RX;
344 uint32_t dw_spi_rxftlr_dflt = (info->fifo_depth * 5) / 8;
345 uint32_t reg_data;
346 int ret;
347
348 spi_context_lock(&spi->ctx, asynchronous, cb, userdata, config);
349
350 #ifdef CONFIG_PM_DEVICE
351 if (!pm_device_is_busy(dev)) {
352 pm_device_busy_set(dev);
353 }
354 #endif /* CONFIG_PM_DEVICE */
355
356 /* Configure */
357 ret = spi_dw_configure(dev, spi, config);
358 if (ret) {
359 goto out;
360 }
361
362 if (!rx_bufs || !rx_bufs->buffers) {
363 tmod = DW_SPI_CTRLR0_TMOD_TX;
364 } else if (!tx_bufs || !tx_bufs->buffers) {
365 tmod = DW_SPI_CTRLR0_TMOD_RX;
366 }
367
368 /* ToDo: add a way to determine EEPROM mode */
369
370 if (tmod >= DW_SPI_CTRLR0_TMOD_RX &&
371 !spi_dw_is_slave(spi)) {
372 reg_data = spi_dw_compute_ndf(rx_bufs->buffers,
373 rx_bufs->count,
374 spi->dfs);
375 if (reg_data == UINT32_MAX) {
376 ret = -EINVAL;
377 goto out;
378 }
379
380 write_ctrlr1(dev, reg_data);
381 } else {
382 write_ctrlr1(dev, 0);
383 }
384
385 if (spi_dw_is_slave(spi)) {
386 /* Enabling MISO line relevantly */
387 if (tmod == DW_SPI_CTRLR0_TMOD_RX) {
388 tmod |= DW_SPI_CTRLR0_SLV_OE;
389 } else {
390 tmod &= ~DW_SPI_CTRLR0_SLV_OE;
391 }
392 }
393
394 /* Updating TMOD in CTRLR0 register */
395 reg_data = read_ctrlr0(dev);
396 reg_data &= ~DW_SPI_CTRLR0_TMOD_RESET;
397 reg_data |= tmod;
398
399 write_ctrlr0(dev, reg_data);
400
401 /* Set buffers info */
402 spi_context_buffers_setup(&spi->ctx, tx_bufs, rx_bufs, spi->dfs);
403
404 spi->fifo_diff = 0U;
405
406 /* Tx Threshold */
407 spi_dw_update_txftlr(dev, spi);
408
409 /* Does Rx thresholds needs to be lower? */
410 reg_data = dw_spi_rxftlr_dflt;
411
412 if (spi_dw_is_slave(spi)) {
413 if (spi->ctx.rx_len &&
414 spi->ctx.rx_len < dw_spi_rxftlr_dflt) {
415 reg_data = spi->ctx.rx_len - 1;
416 }
417 } else {
418 if (spi->ctx.rx_len && spi->ctx.rx_len < info->fifo_depth) {
419 reg_data = spi->ctx.rx_len - 1;
420 }
421 }
422
423 /* Rx Threshold */
424 write_rxftlr(dev, reg_data);
425
426 /* Enable interrupts */
427 reg_data = !rx_bufs ?
428 DW_SPI_IMR_UNMASK & DW_SPI_IMR_MASK_RX :
429 DW_SPI_IMR_UNMASK;
430 write_imr(dev, reg_data);
431
432 if (!spi_dw_is_slave(spi)) {
433 /* if cs is not defined as gpio, use hw cs */
434 if (spi_cs_is_gpio(config)) {
435 spi_context_cs_control(&spi->ctx, true);
436 } else {
437 write_ser(dev, BIT(config->slave));
438 }
439 }
440
441 LOG_DBG("Enabling controller");
442 set_bit_ssienr(dev);
443
444 ret = spi_context_wait_for_completion(&spi->ctx);
445
446 #ifdef CONFIG_SPI_SLAVE
447 if (spi_context_is_slave(&spi->ctx) && !ret) {
448 ret = spi->ctx.recv_frames;
449 }
450 #endif /* CONFIG_SPI_SLAVE */
451
452 out:
453 spi_context_release(&spi->ctx, ret);
454
455 pm_device_busy_clear(dev);
456
457 return ret;
458 }
459
spi_dw_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)460 static int spi_dw_transceive(const struct device *dev,
461 const struct spi_config *config,
462 const struct spi_buf_set *tx_bufs,
463 const struct spi_buf_set *rx_bufs)
464 {
465 LOG_DBG("%p, %p, %p", dev, tx_bufs, rx_bufs);
466
467 return transceive(dev, config, tx_bufs, rx_bufs, false, NULL, NULL);
468 }
469
470 #ifdef CONFIG_SPI_ASYNC
spi_dw_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)471 static int spi_dw_transceive_async(const struct device *dev,
472 const struct spi_config *config,
473 const struct spi_buf_set *tx_bufs,
474 const struct spi_buf_set *rx_bufs,
475 spi_callback_t cb,
476 void *userdata)
477 {
478 LOG_DBG("%p, %p, %p, %p, %p", dev, tx_bufs, rx_bufs, cb, userdata);
479
480 return transceive(dev, config, tx_bufs, rx_bufs, true, cb, userdata);
481 }
482 #endif /* CONFIG_SPI_ASYNC */
483
spi_dw_release(const struct device * dev,const struct spi_config * config)484 static int spi_dw_release(const struct device *dev,
485 const struct spi_config *config)
486 {
487 struct spi_dw_data *spi = dev->data;
488
489 if (!spi_context_configured(&spi->ctx, config)) {
490 return -EINVAL;
491 }
492
493 spi_context_unlock_unconditionally(&spi->ctx);
494
495 return 0;
496 }
497
spi_dw_isr(const struct device * dev)498 void spi_dw_isr(const struct device *dev)
499 {
500 uint32_t int_status;
501 int error;
502
503 int_status = read_isr(dev);
504
505 LOG_DBG("SPI %p int_status 0x%x - (tx: %d, rx: %d)", dev, int_status,
506 read_txflr(dev), read_rxflr(dev));
507
508 if (int_status & DW_SPI_ISR_ERRORS_MASK) {
509 error = -EIO;
510 goto out;
511 }
512
513 error = 0;
514
515 if (int_status & DW_SPI_ISR_RXFIS) {
516 pull_data(dev);
517 }
518
519 if (int_status & DW_SPI_ISR_TXEIS) {
520 push_data(dev);
521 }
522
523 out:
524 clear_interrupts(dev);
525 completed(dev, error);
526 }
527
528 static DEVICE_API(spi, dw_spi_api) = {
529 .transceive = spi_dw_transceive,
530 #ifdef CONFIG_SPI_ASYNC
531 .transceive_async = spi_dw_transceive_async,
532 #endif /* CONFIG_SPI_ASYNC */
533 #ifdef CONFIG_SPI_RTIO
534 .iodev_submit = spi_rtio_iodev_default_submit,
535 #endif
536 .release = spi_dw_release,
537 };
538
spi_dw_init(const struct device * dev)539 int spi_dw_init(const struct device *dev)
540 {
541 int err;
542 const struct spi_dw_config *info = dev->config;
543 struct spi_dw_data *spi = dev->data;
544
545 #ifdef CONFIG_PINCTRL
546 pinctrl_apply_state(info->pcfg, PINCTRL_STATE_DEFAULT);
547 #endif
548
549 DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE);
550
551 info->config_func();
552
553 /* Masking interrupt and making sure controller is disabled */
554 write_imr(dev, DW_SPI_IMR_MASK);
555 clear_bit_ssienr(dev);
556
557 LOG_DBG("Designware SPI driver initialized on device: %p", dev);
558
559 err = spi_context_cs_configure_all(&spi->ctx);
560 if (err < 0) {
561 return err;
562 }
563
564 spi_context_unlock_unconditionally(&spi->ctx);
565
566 return 0;
567 }
568
569 #define SPI_CFG_IRQS_SINGLE_ERR_LINE(inst) \
570 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, rx_avail, irq), \
571 DT_INST_IRQ_BY_NAME(inst, rx_avail, priority), \
572 spi_dw_isr, DEVICE_DT_INST_GET(inst), \
573 0); \
574 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, tx_req, irq), \
575 DT_INST_IRQ_BY_NAME(inst, tx_req, priority), \
576 spi_dw_isr, DEVICE_DT_INST_GET(inst), \
577 0); \
578 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, err_int, irq), \
579 DT_INST_IRQ_BY_NAME(inst, err_int, priority), \
580 spi_dw_isr, DEVICE_DT_INST_GET(inst), \
581 0); \
582 irq_enable(DT_INST_IRQ_BY_NAME(inst, rx_avail, irq)); \
583 irq_enable(DT_INST_IRQ_BY_NAME(inst, tx_req, irq)); \
584 irq_enable(DT_INST_IRQ_BY_NAME(inst, err_int, irq));
585
586 #define SPI_CFG_IRQS_MULTIPLE_ERR_LINES(inst) \
587 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, rx_avail, irq), \
588 DT_INST_IRQ_BY_NAME(inst, rx_avail, priority), \
589 spi_dw_isr, DEVICE_DT_INST_GET(inst), \
590 0); \
591 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, tx_req, irq), \
592 DT_INST_IRQ_BY_NAME(inst, tx_req, priority), \
593 spi_dw_isr, DEVICE_DT_INST_GET(inst), \
594 0); \
595 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, txo_err, irq), \
596 DT_INST_IRQ_BY_NAME(inst, txo_err, priority), \
597 spi_dw_isr, DEVICE_DT_INST_GET(inst), \
598 0); \
599 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, rxo_err, irq), \
600 DT_INST_IRQ_BY_NAME(inst, rxo_err, priority), \
601 spi_dw_isr, DEVICE_DT_INST_GET(inst), \
602 0); \
603 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, rxu_err, irq), \
604 DT_INST_IRQ_BY_NAME(inst, rxu_err, priority), \
605 spi_dw_isr, DEVICE_DT_INST_GET(inst), \
606 0); \
607 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, mst_err, irq), \
608 DT_INST_IRQ_BY_NAME(inst, mst_err, priority), \
609 spi_dw_isr, DEVICE_DT_INST_GET(inst), \
610 0); \
611 irq_enable(DT_INST_IRQ_BY_NAME(inst, rx_avail, irq)); \
612 irq_enable(DT_INST_IRQ_BY_NAME(inst, tx_req, irq)); \
613 irq_enable(DT_INST_IRQ_BY_NAME(inst, txo_err, irq)); \
614 irq_enable(DT_INST_IRQ_BY_NAME(inst, rxo_err, irq)); \
615 irq_enable(DT_INST_IRQ_BY_NAME(inst, rxu_err, irq)); \
616 irq_enable(DT_INST_IRQ_BY_NAME(inst, mst_err, irq));
617
618 #define SPI_DW_IRQ_HANDLER(inst) \
619 void spi_dw_irq_config_##inst(void) \
620 { \
621 COND_CODE_1(IS_EQ(DT_NUM_IRQS(DT_DRV_INST(inst)), 1), \
622 (IRQ_CONNECT(DT_INST_IRQN(inst), \
623 DT_INST_IRQ(inst, priority), \
624 spi_dw_isr, DEVICE_DT_INST_GET(inst), \
625 0); \
626 irq_enable(DT_INST_IRQN(inst));), \
627 (COND_CODE_1(IS_EQ(DT_NUM_IRQS(DT_DRV_INST(inst)), 3), \
628 (SPI_CFG_IRQS_SINGLE_ERR_LINE(inst)), \
629 (SPI_CFG_IRQS_MULTIPLE_ERR_LINES(inst))))) \
630 }
631
632 #define SPI_DW_INIT(inst) \
633 IF_ENABLED(CONFIG_PINCTRL, (PINCTRL_DT_INST_DEFINE(inst);)) \
634 SPI_DW_IRQ_HANDLER(inst); \
635 static struct spi_dw_data spi_dw_data_##inst = { \
636 SPI_CONTEXT_INIT_LOCK(spi_dw_data_##inst, ctx), \
637 SPI_CONTEXT_INIT_SYNC(spi_dw_data_##inst, ctx), \
638 SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(inst), ctx) \
639 }; \
640 static const struct spi_dw_config spi_dw_config_##inst = { \
641 DEVICE_MMIO_ROM_INIT(DT_DRV_INST(inst)), \
642 .clock_frequency = COND_CODE_1( \
643 DT_NODE_HAS_PROP(DT_INST_PHANDLE(inst, clocks), clock_frequency), \
644 (DT_INST_PROP_BY_PHANDLE(inst, clocks, clock_frequency)), \
645 (DT_INST_PROP(inst, clock_frequency))), \
646 .config_func = spi_dw_irq_config_##inst, \
647 .serial_target = DT_INST_PROP(inst, serial_target), \
648 .fifo_depth = DT_INST_PROP(inst, fifo_depth), \
649 .max_xfer_size = DT_INST_PROP(inst, max_xfer_size), \
650 IF_ENABLED(CONFIG_PINCTRL, (.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst),)) \
651 COND_CODE_1(DT_INST_PROP(inst, aux_reg), \
652 (.read_func = aux_reg_read, \
653 .write_func = aux_reg_write, \
654 .set_bit_func = aux_reg_set_bit, \
655 .clear_bit_func = aux_reg_clear_bit, \
656 .test_bit_func = aux_reg_test_bit,), \
657 (.read_func = reg_read, \
658 .write_func = reg_write, \
659 .set_bit_func = reg_set_bit, \
660 .clear_bit_func = reg_clear_bit, \
661 .test_bit_func = reg_test_bit,)) \
662 }; \
663 SPI_DEVICE_DT_INST_DEFINE(inst, \
664 spi_dw_init, \
665 NULL, \
666 &spi_dw_data_##inst, \
667 &spi_dw_config_##inst, \
668 POST_KERNEL, \
669 CONFIG_SPI_INIT_PRIORITY, \
670 &dw_spi_api);
671
672 DT_INST_FOREACH_STATUS_OKAY(SPI_DW_INIT)
673