1 /*
2 * Copyright (c) 2022 Andes Technology Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include "spi_andes_atcspi200.h"
8
9 #include <zephyr/irq.h>
10
11 #define DT_DRV_COMPAT andestech_atcspi200
12
13 typedef void (*atcspi200_cfg_func_t)(void);
14
15 #ifdef CONFIG_ANDES_SPI_DMA_MODE
16
17 #define ANDES_SPI_DMA_ERROR_FLAG 0x01
18 #define ANDES_SPI_DMA_RX_DONE_FLAG 0x02
19 #define ANDES_SPI_DMA_TX_DONE_FLAG 0x04
20 #define ANDES_SPI_DMA_DONE_FLAG \
21 (ANDES_SPI_DMA_RX_DONE_FLAG | ANDES_SPI_DMA_TX_DONE_FLAG)
22
23 struct stream {
24 const struct device *dma_dev;
25 uint32_t channel;
26 uint32_t block_idx;
27 struct dma_config dma_cfg;
28 struct dma_block_config dma_blk_cfg;
29 struct dma_block_config chain_block[MAX_CHAIN_SIZE];
30 uint8_t priority;
31 bool src_addr_increment;
32 bool dst_addr_increment;
33 };
34 #endif
35
36 struct spi_atcspi200_data {
37 struct spi_context ctx;
38 uint32_t tx_fifo_size;
39 uint32_t rx_fifo_size;
40 int tx_cnt;
41 size_t chunk_len;
42 bool busy;
43 #ifdef CONFIG_ANDES_SPI_DMA_MODE
44 struct stream dma_rx;
45 struct stream dma_tx;
46 #endif
47 };
48
49 struct spi_atcspi200_cfg {
50 atcspi200_cfg_func_t cfg_func;
51 uint32_t base;
52 uint32_t irq_num;
53 uint32_t f_sys;
54 bool xip;
55 };
56
57 /* API Functions */
spi_config(const struct device * dev,const struct spi_config * config)58 static int spi_config(const struct device *dev,
59 const struct spi_config *config)
60 {
61 const struct spi_atcspi200_cfg * const cfg = dev->config;
62 uint32_t sclk_div, data_len;
63
64 /* Set the divisor for SPI interface sclk */
65 sclk_div = (cfg->f_sys / (config->frequency << 1)) - 1;
66 sys_clear_bits(SPI_TIMIN(cfg->base), TIMIN_SCLK_DIV_MSK);
67 sys_set_bits(SPI_TIMIN(cfg->base), sclk_div);
68
69 /* Set Master mode */
70 sys_clear_bits(SPI_TFMAT(cfg->base), TFMAT_SLVMODE_MSK);
71
72 /* Disable data merge mode */
73 sys_clear_bits(SPI_TFMAT(cfg->base), TFMAT_DATA_MERGE_MSK);
74
75 /* Set data length */
76 data_len = SPI_WORD_SIZE_GET(config->operation) - 1;
77 sys_clear_bits(SPI_TFMAT(cfg->base), TFMAT_DATA_LEN_MSK);
78 sys_set_bits(SPI_TFMAT(cfg->base), (data_len << TFMAT_DATA_LEN_OFFSET));
79
80 /* Set SPI frame format */
81 if (config->operation & SPI_MODE_CPHA) {
82 sys_set_bits(SPI_TFMAT(cfg->base), TFMAT_CPHA_MSK);
83 } else {
84 sys_clear_bits(SPI_TFMAT(cfg->base), TFMAT_CPHA_MSK);
85 }
86
87 if (config->operation & SPI_MODE_CPOL) {
88 sys_set_bits(SPI_TFMAT(cfg->base), TFMAT_CPOL_MSK);
89 } else {
90 sys_clear_bits(SPI_TFMAT(cfg->base), TFMAT_CPOL_MSK);
91 }
92
93 /* Set SPI bit order */
94 if (config->operation & SPI_TRANSFER_LSB) {
95 sys_set_bits(SPI_TFMAT(cfg->base), TFMAT_LSB_MSK);
96 } else {
97 sys_clear_bits(SPI_TFMAT(cfg->base), TFMAT_LSB_MSK);
98 }
99
100 /* Set TX/RX FIFO threshold */
101 sys_clear_bits(SPI_CTRL(cfg->base), CTRL_TX_THRES_MSK);
102 sys_clear_bits(SPI_CTRL(cfg->base), CTRL_RX_THRES_MSK);
103
104 sys_set_bits(SPI_CTRL(cfg->base), TX_FIFO_THRESHOLD << CTRL_TX_THRES_OFFSET);
105 sys_set_bits(SPI_CTRL(cfg->base), RX_FIFO_THRESHOLD << CTRL_RX_THRES_OFFSET);
106
107 return 0;
108 }
109
spi_transfer(const struct device * dev)110 static int spi_transfer(const struct device *dev)
111 {
112 struct spi_atcspi200_data * const data = dev->data;
113 const struct spi_atcspi200_cfg * const cfg = dev->config;
114 struct spi_context *ctx = &data->ctx;
115 uint32_t data_len, tctrl, int_msk;
116
117 if (data->chunk_len != 0) {
118 data_len = data->chunk_len - 1;
119 } else {
120 data_len = 0;
121 }
122
123 if (data_len > MAX_TRANSFER_CNT) {
124 return -EINVAL;
125 }
126
127 data->tx_cnt = 0;
128
129 if (!spi_context_rx_on(ctx)) {
130 tctrl = (TRNS_MODE_WRITE_ONLY << TCTRL_TRNS_MODE_OFFSET) |
131 (data_len << TCTRL_WR_TCNT_OFFSET);
132 int_msk = IEN_TX_FIFO_MSK | IEN_END_MSK;
133 } else if (!spi_context_tx_on(ctx)) {
134 tctrl = (TRNS_MODE_READ_ONLY << TCTRL_TRNS_MODE_OFFSET) |
135 (data_len << TCTRL_RD_TCNT_OFFSET);
136 int_msk = IEN_RX_FIFO_MSK | IEN_END_MSK;
137 } else {
138 tctrl = (TRNS_MODE_WRITE_READ << TCTRL_TRNS_MODE_OFFSET) |
139 (data_len << TCTRL_WR_TCNT_OFFSET) |
140 (data_len << TCTRL_RD_TCNT_OFFSET);
141 int_msk = IEN_TX_FIFO_MSK |
142 IEN_RX_FIFO_MSK |
143 IEN_END_MSK;
144 }
145
146 sys_write32(tctrl, SPI_TCTRL(cfg->base));
147
148 /* Enable TX/RX FIFO interrupts */
149 sys_write32(int_msk, SPI_INTEN(cfg->base));
150
151 /* Start transferring */
152 sys_write32(0, SPI_CMD(cfg->base));
153
154 return 0;
155 }
156
157
configure(const struct device * dev,const struct spi_config * config)158 static int configure(const struct device *dev,
159 const struct spi_config *config)
160 {
161 struct spi_atcspi200_data * const data = dev->data;
162 struct spi_context *ctx = &(data->ctx);
163
164 if (spi_context_configured(ctx, config)) {
165 /* Already configured. No need to do it again. */
166 return 0;
167 }
168
169 if (SPI_OP_MODE_GET(config->operation) != SPI_OP_MODE_MASTER) {
170 LOG_ERR("Slave mode is not supported on %s",
171 dev->name);
172 return -EINVAL;
173 }
174
175 if (config->operation & SPI_MODE_LOOP) {
176 LOG_ERR("Loopback mode is not supported");
177 return -EINVAL;
178 }
179
180 if ((config->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) {
181 LOG_ERR("Only single line mode is supported");
182 return -EINVAL;
183 }
184
185 ctx->config = config;
186
187 /* SPI configuration */
188 spi_config(dev, config);
189
190 return 0;
191 }
192
193
194 #ifdef CONFIG_ANDES_SPI_DMA_MODE
195
196 static int spi_dma_tx_load(const struct device *dev);
197 static int spi_dma_rx_load(const struct device *dev);
198
spi_tx_dma_enable(const struct device * dev)199 static inline void spi_tx_dma_enable(const struct device *dev)
200 {
201 const struct spi_atcspi200_cfg * const cfg = dev->config;
202 /* Enable TX DMA */
203 sys_set_bits(SPI_CTRL(cfg->base), CTRL_TX_DMA_EN_MSK);
204 }
205
spi_tx_dma_disable(const struct device * dev)206 static inline void spi_tx_dma_disable(const struct device *dev)
207 {
208 const struct spi_atcspi200_cfg * const cfg = dev->config;
209 /* Disable TX DMA */
210 sys_clear_bits(SPI_CTRL(cfg->base), CTRL_TX_DMA_EN_MSK);
211 }
212
spi_rx_dma_enable(const struct device * dev)213 static inline void spi_rx_dma_enable(const struct device *dev)
214 {
215 const struct spi_atcspi200_cfg * const cfg = dev->config;
216 /* Enable RX DMA */
217 sys_set_bits(SPI_CTRL(cfg->base), CTRL_RX_DMA_EN_MSK);
218 }
219
spi_rx_dma_disable(const struct device * dev)220 static inline void spi_rx_dma_disable(const struct device *dev)
221 {
222 const struct spi_atcspi200_cfg * const cfg = dev->config;
223 /* Disable RX DMA */
224 sys_clear_bits(SPI_CTRL(cfg->base), CTRL_RX_DMA_EN_MSK);
225 }
226
spi_dma_move_buffers(const struct device * dev)227 static int spi_dma_move_buffers(const struct device *dev)
228 {
229 struct spi_atcspi200_data *data = dev->data;
230 struct spi_context *ctx = &data->ctx;
231 uint32_t error = 0;
232
233 data->dma_rx.dma_blk_cfg.next_block = NULL;
234 data->dma_tx.dma_blk_cfg.next_block = NULL;
235
236 if (spi_context_tx_on(ctx)) {
237 error = spi_dma_tx_load(dev);
238 if (error != 0) {
239 return error;
240 }
241 }
242
243 if (spi_context_rx_on(ctx)) {
244 error = spi_dma_rx_load(dev);
245 if (error != 0) {
246 return error;
247 }
248 }
249
250 return 0;
251 }
252
dma_rx_callback(const struct device * dev,void * user_data,uint32_t channel,int status)253 static inline void dma_rx_callback(const struct device *dev, void *user_data,
254 uint32_t channel, int status)
255 {
256 const struct device *spi_dev = (struct device *)user_data;
257 struct spi_atcspi200_data *data = spi_dev->data;
258 struct spi_context *ctx = &data->ctx;
259 int error;
260
261 dma_stop(data->dma_rx.dma_dev, data->dma_rx.channel);
262 spi_rx_dma_disable(spi_dev);
263
264 if (spi_context_rx_on(ctx)) {
265 if (spi_dma_rx_load(spi_dev) != 0) {
266 return;
267 }
268 spi_rx_dma_enable(spi_dev);
269 error = dma_start(data->dma_rx.dma_dev, data->dma_rx.channel);
270 __ASSERT(error == 0, "dma_start was failed in rx callback");
271 }
272 }
273
dma_tx_callback(const struct device * dev,void * user_data,uint32_t channel,int status)274 static inline void dma_tx_callback(const struct device *dev, void *user_data,
275 uint32_t channel, int status)
276 {
277 const struct device *spi_dev = (struct device *)user_data;
278 struct spi_atcspi200_data *data = spi_dev->data;
279 struct spi_context *ctx = &data->ctx;
280 int error;
281
282 dma_stop(data->dma_tx.dma_dev, data->dma_tx.channel);
283 spi_tx_dma_disable(spi_dev);
284
285 if (spi_context_tx_on(ctx)) {
286 if (spi_dma_tx_load(spi_dev) != 0) {
287 return;
288 }
289 spi_tx_dma_enable(spi_dev);
290 error = dma_start(data->dma_tx.dma_dev, data->dma_tx.channel);
291 __ASSERT(error == 0, "dma_start was failed in tx callback");
292 }
293 }
294
295 /*
296 * dummy value used for transferring NOP when tx buf is null
297 * and use as dummy sink for when rx buf is null
298 */
299 uint32_t dummy_rx_tx_buffer;
300
spi_dma_tx_load(const struct device * dev)301 static int spi_dma_tx_load(const struct device *dev)
302 {
303 const struct spi_atcspi200_cfg * const cfg = dev->config;
304 struct spi_atcspi200_data *data = dev->data;
305 struct spi_context *ctx = &data->ctx;
306 int remain_len, ret, dfs;
307
308 /* prepare the block for this TX DMA channel */
309 memset(&data->dma_tx.dma_blk_cfg, 0, sizeof(struct dma_block_config));
310
311 if (ctx->current_tx->len > data->chunk_len) {
312 data->dma_tx.dma_blk_cfg.block_size = data->chunk_len /
313 data->dma_tx.dma_cfg.dest_data_size;
314 } else {
315 data->dma_tx.dma_blk_cfg.block_size = ctx->current_tx->len /
316 data->dma_tx.dma_cfg.dest_data_size;
317 }
318
319 /* tx direction has memory as source and periph as dest. */
320 if (ctx->current_tx->buf == NULL) {
321 dummy_rx_tx_buffer = 0;
322 /* if tx buff is null, then sends NOP on the line. */
323 data->dma_tx.dma_blk_cfg.source_address = (uintptr_t)&dummy_rx_tx_buffer;
324 data->dma_tx.dma_blk_cfg.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
325 } else {
326 data->dma_tx.dma_blk_cfg.source_address = (uintptr_t)ctx->current_tx->buf;
327 if (data->dma_tx.src_addr_increment) {
328 data->dma_tx.dma_blk_cfg.source_addr_adj = DMA_ADDR_ADJ_INCREMENT;
329 } else {
330 data->dma_tx.dma_blk_cfg.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
331 }
332 }
333
334 dfs = SPI_WORD_SIZE_GET(ctx->config->operation) >> 3;
335 remain_len = data->chunk_len - ctx->current_tx->len;
336 spi_context_update_tx(ctx, dfs, ctx->current_tx->len);
337
338 data->dma_tx.dma_blk_cfg.dest_address = (uint32_t)SPI_DATA(cfg->base);
339 /* fifo mode NOT USED there */
340 if (data->dma_tx.dst_addr_increment) {
341 data->dma_tx.dma_blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_INCREMENT;
342 } else {
343 data->dma_tx.dma_blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
344 }
345
346 /* direction is given by the DT */
347 data->dma_tx.dma_cfg.head_block = &data->dma_tx.dma_blk_cfg;
348 data->dma_tx.dma_cfg.head_block->next_block = NULL;
349 /* give the client dev as arg, as the callback comes from the dma */
350 data->dma_tx.dma_cfg.user_data = (void *)dev;
351
352 if (data->dma_tx.dma_cfg.source_chaining_en) {
353 data->dma_tx.dma_cfg.block_count = ctx->tx_count;
354 data->dma_tx.dma_cfg.dma_callback = NULL;
355 data->dma_tx.block_idx = 0;
356 struct dma_block_config *blk_cfg = &data->dma_tx.dma_blk_cfg;
357 const struct spi_buf *current_tx = ctx->current_tx;
358
359 while (remain_len > 0) {
360 struct dma_block_config *next_blk_cfg;
361
362 next_blk_cfg = &data->dma_tx.chain_block[data->dma_tx.block_idx];
363 data->dma_tx.block_idx += 1;
364
365 blk_cfg->next_block = next_blk_cfg;
366 current_tx = ctx->current_tx;
367
368 next_blk_cfg->block_size = current_tx->len /
369 data->dma_tx.dma_cfg.dest_data_size;
370
371 /* tx direction has memory as source and periph as dest. */
372 if (current_tx->buf == NULL) {
373 dummy_rx_tx_buffer = 0;
374 /* if tx buff is null, then sends NOP on the line. */
375 next_blk_cfg->source_address = (uintptr_t)&dummy_rx_tx_buffer;
376 next_blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
377 } else {
378 next_blk_cfg->source_address = (uintptr_t)current_tx->buf;
379 if (data->dma_tx.src_addr_increment) {
380 next_blk_cfg->source_addr_adj = DMA_ADDR_ADJ_INCREMENT;
381 } else {
382 next_blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
383 }
384 }
385
386 next_blk_cfg->dest_address = (uint32_t)SPI_DATA(cfg->base);
387 /* fifo mode NOT USED there */
388 if (data->dma_tx.dst_addr_increment) {
389 next_blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_INCREMENT;
390 } else {
391 next_blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
392 }
393
394 blk_cfg = next_blk_cfg;
395 next_blk_cfg->next_block = NULL;
396 remain_len -= ctx->current_tx->len;
397 spi_context_update_tx(ctx, dfs, ctx->current_tx->len);
398 }
399
400 } else {
401 data->dma_tx.dma_blk_cfg.next_block = NULL;
402 data->dma_tx.dma_cfg.block_count = 1;
403 data->dma_tx.dma_cfg.dma_callback = dma_tx_callback;
404 }
405
406 /* pass our client origin to the dma: data->dma_tx.dma_channel */
407 ret = dma_config(data->dma_tx.dma_dev, data->dma_tx.channel,
408 &data->dma_tx.dma_cfg);
409 /* the channel is the actual stream from 0 */
410 if (ret != 0) {
411 data->dma_tx.block_idx = 0;
412 data->dma_tx.dma_blk_cfg.next_block = NULL;
413 return ret;
414 }
415
416 return 0;
417 }
418
spi_dma_rx_load(const struct device * dev)419 static int spi_dma_rx_load(const struct device *dev)
420 {
421 const struct spi_atcspi200_cfg * const cfg = dev->config;
422 struct spi_atcspi200_data *data = dev->data;
423 struct spi_context *ctx = &data->ctx;
424 int remain_len, ret, dfs;
425
426 /* prepare the block for this RX DMA channel */
427 memset(&data->dma_rx.dma_blk_cfg, 0, sizeof(struct dma_block_config));
428
429 if (ctx->current_rx->len > data->chunk_len) {
430 data->dma_rx.dma_blk_cfg.block_size = data->chunk_len /
431 data->dma_rx.dma_cfg.dest_data_size;
432 } else {
433 data->dma_rx.dma_blk_cfg.block_size = ctx->current_rx->len /
434 data->dma_rx.dma_cfg.dest_data_size;
435 }
436
437 /* rx direction has periph as source and mem as dest. */
438 if (ctx->current_rx->buf == NULL) {
439 /* if rx buff is null, then write data to dummy address. */
440 data->dma_rx.dma_blk_cfg.dest_address = (uintptr_t)&dummy_rx_tx_buffer;
441 data->dma_rx.dma_blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
442 } else {
443 data->dma_rx.dma_blk_cfg.dest_address = (uintptr_t)ctx->current_rx->buf;
444 if (data->dma_rx.dst_addr_increment) {
445 data->dma_rx.dma_blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_INCREMENT;
446 } else {
447 data->dma_rx.dma_blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
448 }
449 }
450
451 dfs = SPI_WORD_SIZE_GET(ctx->config->operation) >> 3;
452 remain_len = data->chunk_len - ctx->current_rx->len;
453 spi_context_update_rx(ctx, dfs, ctx->current_rx->len);
454
455 data->dma_rx.dma_blk_cfg.source_address = (uint32_t)SPI_DATA(cfg->base);
456
457 if (data->dma_rx.src_addr_increment) {
458 data->dma_rx.dma_blk_cfg.source_addr_adj = DMA_ADDR_ADJ_INCREMENT;
459 } else {
460 data->dma_rx.dma_blk_cfg.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
461 }
462
463 data->dma_rx.dma_cfg.head_block = &data->dma_rx.dma_blk_cfg;
464 data->dma_rx.dma_cfg.head_block->next_block = NULL;
465 data->dma_rx.dma_cfg.user_data = (void *)dev;
466
467 if (data->dma_rx.dma_cfg.source_chaining_en) {
468 data->dma_rx.dma_cfg.block_count = ctx->rx_count;
469 data->dma_rx.dma_cfg.dma_callback = NULL;
470 data->dma_rx.block_idx = 0;
471 struct dma_block_config *blk_cfg = &data->dma_rx.dma_blk_cfg;
472 const struct spi_buf *current_rx = ctx->current_rx;
473
474 while (remain_len > 0) {
475 struct dma_block_config *next_blk_cfg;
476
477 next_blk_cfg = &data->dma_rx.chain_block[data->dma_rx.block_idx];
478 data->dma_rx.block_idx += 1;
479
480 blk_cfg->next_block = next_blk_cfg;
481 current_rx = ctx->current_rx;
482
483 next_blk_cfg->block_size = current_rx->len /
484 data->dma_rx.dma_cfg.dest_data_size;
485
486 /* rx direction has periph as source and mem as dest. */
487 if (current_rx->buf == NULL) {
488 /* if rx buff is null, then write data to dummy address. */
489 next_blk_cfg->dest_address = (uintptr_t)&dummy_rx_tx_buffer;
490 next_blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
491 } else {
492 next_blk_cfg->dest_address = (uintptr_t)current_rx->buf;
493 if (data->dma_rx.dst_addr_increment) {
494 next_blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_INCREMENT;
495 } else {
496 next_blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
497 }
498 }
499
500 next_blk_cfg->source_address = (uint32_t)SPI_DATA(cfg->base);
501
502 if (data->dma_rx.src_addr_increment) {
503 next_blk_cfg->source_addr_adj = DMA_ADDR_ADJ_INCREMENT;
504 } else {
505 next_blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
506 }
507
508 blk_cfg = next_blk_cfg;
509 next_blk_cfg->next_block = NULL;
510 remain_len -= ctx->current_rx->len;
511 spi_context_update_rx(ctx, dfs, ctx->current_rx->len);
512 }
513 } else {
514 data->dma_rx.dma_blk_cfg.next_block = NULL;
515 data->dma_rx.dma_cfg.block_count = 1;
516 data->dma_rx.dma_cfg.dma_callback = dma_rx_callback;
517 }
518
519 /* pass our client origin to the dma: data->dma_rx.channel */
520 ret = dma_config(data->dma_rx.dma_dev, data->dma_rx.channel,
521 &data->dma_rx.dma_cfg);
522 /* the channel is the actual stream from 0 */
523 if (ret != 0) {
524 data->dma_rx.block_idx = 0;
525 data->dma_rx.dma_blk_cfg.next_block = NULL;
526 return ret;
527 }
528
529 return 0;
530 }
531
spi_transfer_dma(const struct device * dev)532 static int spi_transfer_dma(const struct device *dev)
533 {
534 const struct spi_atcspi200_cfg * const cfg = dev->config;
535 struct spi_atcspi200_data * const data = dev->data;
536 struct spi_context *ctx = &data->ctx;
537 uint32_t data_len, tctrl, dma_rx_enable, dma_tx_enable;
538 int error = 0;
539
540 data_len = data->chunk_len - 1;
541 if (data_len > MAX_TRANSFER_CNT) {
542 return -EINVAL;
543 }
544
545 if (!spi_context_rx_on(ctx)) {
546 tctrl = (TRNS_MODE_WRITE_ONLY << TCTRL_TRNS_MODE_OFFSET) |
547 (data_len << TCTRL_WR_TCNT_OFFSET);
548 dma_rx_enable = 0;
549 dma_tx_enable = 1;
550 } else if (!spi_context_tx_on(ctx)) {
551 tctrl = (TRNS_MODE_READ_ONLY << TCTRL_TRNS_MODE_OFFSET) |
552 (data_len << TCTRL_RD_TCNT_OFFSET);
553 dma_rx_enable = 1;
554 dma_tx_enable = 0;
555 } else {
556 tctrl = (TRNS_MODE_WRITE_READ << TCTRL_TRNS_MODE_OFFSET) |
557 (data_len << TCTRL_WR_TCNT_OFFSET) |
558 (data_len << TCTRL_RD_TCNT_OFFSET);
559 dma_rx_enable = 1;
560 dma_tx_enable = 1;
561 }
562
563 sys_write32(tctrl, SPI_TCTRL(cfg->base));
564
565 /* Set sclk_div to zero */
566 sys_clear_bits(SPI_TIMIN(cfg->base), 0xff);
567
568 /* Enable END Interrupts */
569 sys_write32(IEN_END_MSK, SPI_INTEN(cfg->base));
570
571 /* Setting DMA config*/
572 error = spi_dma_move_buffers(dev);
573 if (error != 0) {
574 return error;
575 }
576
577 /* Start transferring */
578 sys_write32(0, SPI_CMD(cfg->base));
579
580 if (dma_rx_enable) {
581 spi_rx_dma_enable(dev);
582 error = dma_start(data->dma_rx.dma_dev, data->dma_rx.channel);
583 if (error != 0) {
584 return error;
585 }
586 }
587 if (dma_tx_enable) {
588 spi_tx_dma_enable(dev);
589 error = dma_start(data->dma_tx.dma_dev, data->dma_tx.channel);
590 if (error != 0) {
591 return error;
592 }
593 }
594
595 return 0;
596 }
597 #endif
598
transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)599 static int transceive(const struct device *dev,
600 const struct spi_config *config,
601 const struct spi_buf_set *tx_bufs,
602 const struct spi_buf_set *rx_bufs,
603 bool asynchronous,
604 spi_callback_t cb,
605 void *userdata)
606 {
607 const struct spi_atcspi200_cfg * const cfg = dev->config;
608 struct spi_atcspi200_data * const data = dev->data;
609 struct spi_context *ctx = &data->ctx;
610 int error, dfs;
611 size_t chunk_len;
612
613 spi_context_lock(ctx, asynchronous, cb, userdata, config);
614 error = configure(dev, config);
615 if (error == 0) {
616 data->busy = true;
617
618 dfs = SPI_WORD_SIZE_GET(ctx->config->operation) >> 3;
619 spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, dfs);
620 spi_context_cs_control(ctx, true);
621
622 sys_set_bits(SPI_CTRL(cfg->base), CTRL_TX_FIFO_RST_MSK);
623 sys_set_bits(SPI_CTRL(cfg->base), CTRL_RX_FIFO_RST_MSK);
624
625 if (!spi_context_rx_on(ctx)) {
626 chunk_len = spi_context_total_tx_len(ctx);
627 } else if (!spi_context_tx_on(ctx)) {
628 chunk_len = spi_context_total_rx_len(ctx);
629 } else {
630 size_t rx_len = spi_context_total_rx_len(ctx);
631 size_t tx_len = spi_context_total_tx_len(ctx);
632
633 chunk_len = MIN(rx_len, tx_len);
634 }
635
636 data->chunk_len = chunk_len;
637
638 #ifdef CONFIG_ANDES_SPI_DMA_MODE
639 if ((data->dma_tx.dma_dev != NULL) && (data->dma_rx.dma_dev != NULL)) {
640 error = spi_transfer_dma(dev);
641 if (error != 0) {
642 spi_context_cs_control(ctx, false);
643 goto out;
644 }
645 } else {
646 #endif /* CONFIG_ANDES_SPI_DMA_MODE */
647
648 error = spi_transfer(dev);
649 if (error != 0) {
650 spi_context_cs_control(ctx, false);
651 goto out;
652 }
653
654 #ifdef CONFIG_ANDES_SPI_DMA_MODE
655 }
656 #endif /* CONFIG_ANDES_SPI_DMA_MODE */
657 error = spi_context_wait_for_completion(ctx);
658 spi_context_cs_control(ctx, false);
659 }
660 out:
661 spi_context_release(ctx, error);
662
663 return error;
664 }
665
spi_atcspi200_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)666 int spi_atcspi200_transceive(const struct device *dev,
667 const struct spi_config *config,
668 const struct spi_buf_set *tx_bufs,
669 const struct spi_buf_set *rx_bufs)
670 {
671 return transceive(dev, config, tx_bufs, rx_bufs, false, NULL, NULL);
672 }
673
674 #ifdef CONFIG_SPI_ASYNC
spi_atcspi200_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)675 int spi_atcspi200_transceive_async(const struct device *dev,
676 const struct spi_config *config,
677 const struct spi_buf_set *tx_bufs,
678 const struct spi_buf_set *rx_bufs,
679 spi_callback_t cb,
680 void *userdata)
681 {
682 return transceive(dev, config, tx_bufs, rx_bufs, true, cb, userdata);
683 }
684 #endif
685
spi_atcspi200_release(const struct device * dev,const struct spi_config * config)686 int spi_atcspi200_release(const struct device *dev,
687 const struct spi_config *config)
688 {
689
690 struct spi_atcspi200_data * const data = dev->data;
691
692 if (data->busy) {
693 return -EBUSY;
694 }
695
696 spi_context_unlock_unconditionally(&data->ctx);
697
698 return 0;
699 }
700
spi_atcspi200_init(const struct device * dev)701 int spi_atcspi200_init(const struct device *dev)
702 {
703 const struct spi_atcspi200_cfg * const cfg = dev->config;
704 struct spi_atcspi200_data * const data = dev->data;
705 int err = 0;
706
707 /* we should not configure the device we are running on */
708 if (cfg->xip) {
709 return -EINVAL;
710 }
711
712 spi_context_unlock_unconditionally(&data->ctx);
713
714 #ifdef CONFIG_ANDES_SPI_DMA_MODE
715 if (!data->dma_tx.dma_dev) {
716 LOG_ERR("DMA device not found");
717 return -ENODEV;
718 }
719
720 if (!data->dma_rx.dma_dev) {
721 LOG_ERR("DMA device not found");
722 return -ENODEV;
723 }
724 #endif
725
726 /* Get the TX/RX FIFO size of this device */
727 data->tx_fifo_size = TX_FIFO_SIZE(cfg->base);
728 data->rx_fifo_size = RX_FIFO_SIZE(cfg->base);
729
730 cfg->cfg_func();
731
732 irq_enable(cfg->irq_num);
733
734 err = spi_context_cs_configure_all(&data->ctx);
735 if (err < 0) {
736 return err;
737 }
738
739 return 0;
740 }
741
742 static DEVICE_API(spi, spi_atcspi200_api) = {
743 .transceive = spi_atcspi200_transceive,
744 #ifdef CONFIG_SPI_ASYNC
745 .transceive_async = spi_atcspi200_transceive_async,
746 #endif
747 #ifdef CONFIG_SPI_RTIO
748 .iodev_submit = spi_rtio_iodev_default_submit,
749 #endif
750 .release = spi_atcspi200_release
751 };
752
spi_atcspi200_irq_handler(void * arg)753 static void spi_atcspi200_irq_handler(void *arg)
754 {
755 const struct device * const dev = (const struct device *) arg;
756 const struct spi_atcspi200_cfg * const cfg = dev->config;
757 struct spi_atcspi200_data * const data = dev->data;
758 struct spi_context *ctx = &data->ctx;
759 uint32_t rx_data, cur_tx_fifo_num, cur_rx_fifo_num;
760 uint32_t i, dfs, intr_status, spi_status;
761 uint32_t tx_num = 0, tx_data = 0;
762 int error = 0;
763
764 intr_status = sys_read32(SPI_INTST(cfg->base));
765 dfs = SPI_WORD_SIZE_GET(ctx->config->operation) >> 3;
766
767 if ((intr_status & INTST_TX_FIFO_INT_MSK) &&
768 !(intr_status & INTST_END_INT_MSK)) {
769
770 spi_status = sys_read32(SPI_STAT(cfg->base));
771 cur_tx_fifo_num = GET_TX_NUM(cfg->base);
772
773 tx_num = data->tx_fifo_size - cur_tx_fifo_num;
774
775 for (i = tx_num; i > 0; i--) {
776
777 if (data->tx_cnt >= data->chunk_len) {
778 /* Have already sent a chunk of data, so stop
779 * sending data!
780 */
781 sys_clear_bits(SPI_INTEN(cfg->base), IEN_TX_FIFO_MSK);
782 break;
783 }
784
785 if (spi_context_tx_buf_on(ctx)) {
786
787 switch (dfs) {
788 case 1:
789 tx_data = *ctx->tx_buf;
790 break;
791 case 2:
792 tx_data = *(uint16_t *)ctx->tx_buf;
793 break;
794 }
795
796 } else if (spi_context_tx_on(ctx)) {
797 tx_data = 0;
798 } else {
799 sys_clear_bits(SPI_INTEN(cfg->base), IEN_TX_FIFO_MSK);
800 break;
801 }
802
803 sys_write32(tx_data, SPI_DATA(cfg->base));
804
805 spi_context_update_tx(ctx, dfs, 1);
806
807 data->tx_cnt++;
808 }
809 sys_write32(INTST_TX_FIFO_INT_MSK, SPI_INTST(cfg->base));
810
811 }
812
813 if (intr_status & INTST_RX_FIFO_INT_MSK) {
814 cur_rx_fifo_num = GET_RX_NUM(cfg->base);
815
816 for (i = cur_rx_fifo_num; i > 0; i--) {
817
818 rx_data = sys_read32(SPI_DATA(cfg->base));
819
820 if (spi_context_rx_buf_on(ctx)) {
821
822 switch (dfs) {
823 case 1:
824 *ctx->rx_buf = rx_data;
825 break;
826 case 2:
827 *(uint16_t *)ctx->rx_buf = rx_data;
828 break;
829 }
830
831 } else if (!spi_context_rx_on(ctx)) {
832 sys_clear_bits(SPI_INTEN(cfg->base), IEN_RX_FIFO_MSK);
833 }
834
835 spi_context_update_rx(ctx, dfs, 1);
836 }
837 sys_write32(INTST_RX_FIFO_INT_MSK, SPI_INTST(cfg->base));
838 }
839
840 if (intr_status & INTST_END_INT_MSK) {
841
842 /* Clear end interrupt */
843 sys_write32(INTST_END_INT_MSK, SPI_INTST(cfg->base));
844
845 /* Disable all SPI interrupts */
846 sys_write32(0, SPI_INTEN(cfg->base));
847
848 #ifdef CONFIG_ANDES_SPI_DMA_MODE
849 if ((data->dma_tx.dma_dev != NULL) && data->dma_tx.dma_cfg.source_chaining_en) {
850
851 spi_tx_dma_disable(dev);
852 dma_stop(data->dma_tx.dma_dev, data->dma_tx.channel);
853 data->dma_tx.block_idx = 0;
854 data->dma_tx.dma_blk_cfg.next_block = NULL;
855 }
856
857 if ((data->dma_rx.dma_dev != NULL) && data->dma_rx.dma_cfg.source_chaining_en) {
858
859 spi_rx_dma_disable(dev);
860 dma_stop(data->dma_rx.dma_dev, data->dma_rx.channel);
861 data->dma_rx.block_idx = 0;
862 data->dma_rx.dma_blk_cfg.next_block = NULL;
863 }
864 #endif /* CONFIG_ANDES_SPI_DMA_MODE */
865
866 data->busy = false;
867
868 spi_context_complete(ctx, dev, error);
869
870 }
871 }
872
873 #if CONFIG_ANDES_SPI_DMA_MODE
874
875 #define ANDES_DMA_CONFIG_DIRECTION(config) (FIELD_GET(GENMASK(1, 0), config))
876 #define ANDES_DMA_CONFIG_PERIPHERAL_ADDR_INC(config) (FIELD_GET(BIT(2), config))
877 #define ANDES_DMA_CONFIG_MEMORY_ADDR_INC(config) (FIELD_GET(BIT(3), config))
878 #define ANDES_DMA_CONFIG_PERIPHERAL_DATA_SIZE(config) (1 << (FIELD_GET(GENMASK(6, 4), config)))
879 #define ANDES_DMA_CONFIG_MEMORY_DATA_SIZE(config) (1 << (FIELD_GET(GENMASK(9, 7), config)))
880 #define ANDES_DMA_CONFIG_PRIORITY(config) (FIELD_GET(BIT(10), config))
881
882 #define DMA_CHANNEL_CONFIG(id, dir) \
883 DT_INST_DMAS_CELL_BY_NAME(id, dir, channel_config)
884
885 #define SPI_DMA_CHANNEL_INIT(index, dir, dir_cap, src_dev, dest_dev) \
886 .dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(index, dir)), \
887 .channel = \
888 DT_INST_DMAS_CELL_BY_NAME(index, dir, channel), \
889 .dma_cfg = { \
890 .dma_slot = \
891 DT_INST_DMAS_CELL_BY_NAME(index, dir, slot), \
892 .channel_direction = ANDES_DMA_CONFIG_DIRECTION( \
893 DMA_CHANNEL_CONFIG(index, dir)), \
894 .complete_callback_en = 0, \
895 .error_callback_dis = 0, \
896 .source_data_size = \
897 ANDES_DMA_CONFIG_##src_dev##_DATA_SIZE( \
898 DMA_CHANNEL_CONFIG(index, dir) \
899 ), \
900 .dest_data_size = \
901 ANDES_DMA_CONFIG_##dest_dev##_DATA_SIZE( \
902 DMA_CHANNEL_CONFIG(index, dir) \
903 ), \
904 .source_burst_length = 1, /* SINGLE transfer */ \
905 .dest_burst_length = 1, /* SINGLE transfer */ \
906 .channel_priority = ANDES_DMA_CONFIG_PRIORITY( \
907 DMA_CHANNEL_CONFIG(index, dir) \
908 ), \
909 .source_chaining_en = DT_PROP(DT_INST_DMAS_CTLR_BY_NAME( \
910 index, dir), chain_transfer), \
911 .dest_chaining_en = DT_PROP(DT_INST_DMAS_CTLR_BY_NAME( \
912 index, dir), chain_transfer), \
913 }, \
914 .src_addr_increment = \
915 ANDES_DMA_CONFIG_##src_dev##_ADDR_INC( \
916 DMA_CHANNEL_CONFIG(index, dir) \
917 ), \
918 .dst_addr_increment = \
919 ANDES_DMA_CONFIG_##dest_dev##_ADDR_INC( \
920 DMA_CHANNEL_CONFIG(index, dir) \
921 )
922
923 #define SPI_DMA_CHANNEL(id, dir, DIR, src, dest) \
924 .dma_##dir = { \
925 COND_CODE_1(DT_INST_DMAS_HAS_NAME(id, dir), \
926 (SPI_DMA_CHANNEL_INIT(id, dir, DIR, src, dest)), \
927 (NULL)) \
928 },
929
930 #else
931 #define SPI_DMA_CHANNEL(id, dir, DIR, src, dest)
932 #endif
933
934 #define SPI_BUSY_INIT .busy = false,
935
936 #if (CONFIG_XIP)
937 #define SPI_ROM_CFG_XIP(node_id) DT_SAME_NODE(node_id, DT_BUS(DT_CHOSEN(zephyr_flash)))
938 #else
939 #define SPI_ROM_CFG_XIP(node_id) false
940 #endif
941
942 #define SPI_INIT(n) \
943 static struct spi_atcspi200_data spi_atcspi200_dev_data_##n = { \
944 SPI_CONTEXT_INIT_LOCK(spi_atcspi200_dev_data_##n, ctx), \
945 SPI_CONTEXT_INIT_SYNC(spi_atcspi200_dev_data_##n, ctx), \
946 SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \
947 SPI_BUSY_INIT \
948 SPI_DMA_CHANNEL(n, rx, RX, PERIPHERAL, MEMORY) \
949 SPI_DMA_CHANNEL(n, tx, TX, MEMORY, PERIPHERAL) \
950 }; \
951 static void spi_atcspi200_cfg_##n(void); \
952 static struct spi_atcspi200_cfg spi_atcspi200_dev_cfg_##n = { \
953 .cfg_func = spi_atcspi200_cfg_##n, \
954 .base = DT_INST_REG_ADDR(n), \
955 .irq_num = DT_INST_IRQN(n), \
956 .f_sys = DT_INST_PROP(n, clock_frequency), \
957 .xip = SPI_ROM_CFG_XIP(DT_DRV_INST(n)), \
958 }; \
959 \
960 SPI_DEVICE_DT_INST_DEFINE(n, \
961 spi_atcspi200_init, \
962 NULL, \
963 &spi_atcspi200_dev_data_##n, \
964 &spi_atcspi200_dev_cfg_##n, \
965 POST_KERNEL, \
966 CONFIG_SPI_INIT_PRIORITY, \
967 &spi_atcspi200_api); \
968 \
969 static void spi_atcspi200_cfg_##n(void) \
970 { \
971 IRQ_CONNECT(DT_INST_IRQN(n), \
972 DT_INST_IRQ(n, priority), \
973 spi_atcspi200_irq_handler, \
974 DEVICE_DT_INST_GET(n), \
975 0); \
976 };
977
978 DT_INST_FOREACH_STATUS_OKAY(SPI_INIT)
979