1 /*
2 * Copyright (c) 2016, Freescale Semiconductor, Inc.
3 * Copyright (c) 2017, NXP
4 * Copyright (c) 2021, ATL Electronics.
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #define DT_DRV_COMPAT cypress_psoc6_spi
10
11 #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL
12 #include <zephyr/logging/log.h>
13 LOG_MODULE_REGISTER(spi_psoc6);
14
15 #include <errno.h>
16 #include <zephyr/drivers/pinctrl.h>
17 #include <zephyr/drivers/spi.h>
18 #include <soc.h>
19
20 #include "spi_context.h"
21
22 #include "cy_syslib.h"
23 #include "cy_sysclk.h"
24 #include "cy_scb_spi.h"
25 #include "cy_sysint.h"
26
27 #define SPI_CHIP_SELECT_COUNT 4
28 #define SPI_MAX_DATA_WIDTH 16
29 #define SPI_PSOC6_CLK_DIV_NUMBER 1
30
31 struct spi_psoc6_config {
32 CySCB_Type *base;
33 uint32_t periph_id;
34 void (*irq_config_func)(const struct device *dev);
35 const struct pinctrl_dev_config *pcfg;
36 };
37
38 struct spi_psoc6_transfer {
39 uint8_t *txData;
40 uint8_t *rxData;
41 size_t dataSize;
42 };
43
44 struct spi_psoc6_data {
45 struct spi_context ctx;
46 struct cy_stc_scb_spi_config cfg;
47 struct spi_psoc6_transfer xfer;
48 };
49
spi_psoc6_transfer_next_packet(const struct device * dev)50 static void spi_psoc6_transfer_next_packet(const struct device *dev)
51 {
52 const struct spi_psoc6_config *config = dev->config;
53 struct spi_psoc6_data *data = dev->data;
54 struct spi_context *ctx = &data->ctx;
55 struct spi_psoc6_transfer *xfer = &data->xfer;
56 uint32_t count;
57
58 LOG_DBG("TX L: %d, RX L: %d", ctx->tx_len, ctx->rx_len);
59
60 if ((ctx->tx_len == 0U) && (ctx->rx_len == 0U)) {
61 /* nothing left to rx or tx, we're done! */
62 xfer->dataSize = 0U;
63
64 spi_context_cs_control(ctx, false);
65 spi_context_complete(ctx, dev, 0U);
66 return;
67 }
68
69 if (ctx->tx_len == 0U) {
70 /* rx only, nothing to tx */
71 xfer->txData = NULL;
72 xfer->rxData = ctx->rx_buf;
73 xfer->dataSize = ctx->rx_len;
74 } else if (ctx->rx_len == 0U) {
75 /* tx only, nothing to rx */
76 xfer->txData = (uint8_t *) ctx->tx_buf;
77 xfer->rxData = NULL;
78 xfer->dataSize = ctx->tx_len;
79 } else if (ctx->tx_len == ctx->rx_len) {
80 /* rx and tx are the same length */
81 xfer->txData = (uint8_t *) ctx->tx_buf;
82 xfer->rxData = ctx->rx_buf;
83 xfer->dataSize = ctx->tx_len;
84 } else if (ctx->tx_len > ctx->rx_len) {
85 /* Break up the tx into multiple transfers so we don't have to
86 * rx into a longer intermediate buffer. Leave chip select
87 * active between transfers.
88 */
89 xfer->txData = (uint8_t *) ctx->tx_buf;
90 xfer->rxData = ctx->rx_buf;
91 xfer->dataSize = ctx->rx_len;
92 } else {
93 /* Break up the rx into multiple transfers so we don't have to
94 * tx from a longer intermediate buffer. Leave chip select
95 * active between transfers.
96 */
97 xfer->txData = (uint8_t *) ctx->tx_buf;
98 xfer->rxData = ctx->rx_buf;
99 xfer->dataSize = ctx->tx_len;
100 }
101
102 if (xfer->txData != NULL) {
103 if (Cy_SCB_SPI_WriteArray(config->base, xfer->txData,
104 xfer->dataSize) != xfer->dataSize) {
105 goto err;
106 }
107 } else {
108 /* Need fill TX fifo with garbage to perform read.
109 * This keeps logic simple and saves stack.
110 * Use 0 as dummy data.
111 */
112 for (count = 0U; count < xfer->dataSize; count++) {
113 if (Cy_SCB_SPI_Write(config->base, 0U) == 0U) {
114 goto err;
115 }
116 }
117 }
118
119 LOG_DBG("TRX L: %d", xfer->dataSize);
120
121 return;
122 err:
123 /* no FIFO available to run the transfer */
124 xfer->dataSize = 0U;
125
126 spi_context_cs_control(ctx, false);
127 spi_context_complete(ctx, dev, -ENOMEM);
128 }
129
spi_psoc6_isr(const struct device * dev)130 static void spi_psoc6_isr(const struct device *dev)
131 {
132 const struct spi_psoc6_config *config = dev->config;
133 struct spi_psoc6_data *data = dev->data;
134
135 Cy_SCB_ClearMasterInterrupt(config->base,
136 CY_SCB_MASTER_INTR_SPI_DONE);
137
138 /* extract data from RX FIFO */
139 if (data->xfer.rxData != NULL) {
140 Cy_SCB_SPI_ReadArray(config->base,
141 data->xfer.rxData,
142 data->xfer.dataSize);
143 } else {
144 Cy_SCB_ClearRxFifo(config->base);
145 }
146
147 /* Set next data block */
148 spi_context_update_tx(&data->ctx, 1, data->xfer.dataSize);
149 spi_context_update_rx(&data->ctx, 1, data->xfer.dataSize);
150
151 /* Start next block
152 * Since 1 byte at TX FIFO will start transfer data, let's try
153 * minimize ISR recursion disabling all interrupt sources when add
154 * data on TX FIFO
155 */
156 Cy_SCB_SetMasterInterruptMask(config->base, 0U);
157
158 spi_psoc6_transfer_next_packet(dev);
159
160 if (data->xfer.dataSize > 0U) {
161 Cy_SCB_SetMasterInterruptMask(config->base,
162 CY_SCB_MASTER_INTR_SPI_DONE);
163 }
164 }
165
spi_psoc6_get_freqdiv(uint32_t frequency)166 static uint32_t spi_psoc6_get_freqdiv(uint32_t frequency)
167 {
168 uint32_t oversample;
169 uint32_t bus_freq = 100000000UL;
170 /*
171 * TODO: Get PerBusSpeed when clocks are available to PSoC-6.
172 * Currently the bus freq is fixed to 50Mhz and max SPI clk can be
173 * 12.5MHz.
174 */
175
176 for (oversample = 4; oversample < 16; oversample++) {
177 if ((bus_freq / oversample) <= frequency) {
178 break;
179 }
180 }
181
182 /* Oversample [4, 16] */
183 return oversample;
184 }
185
spi_psoc6_master_get_defaults(struct cy_stc_scb_spi_config * cfg)186 static void spi_psoc6_master_get_defaults(struct cy_stc_scb_spi_config *cfg)
187 {
188 cfg->spiMode = CY_SCB_SPI_MASTER;
189 cfg->subMode = CY_SCB_SPI_MOTOROLA;
190 cfg->sclkMode = 0U;
191 cfg->oversample = 0U;
192 cfg->rxDataWidth = 0U;
193 cfg->txDataWidth = 0U;
194 cfg->enableMsbFirst = false;
195 cfg->enableFreeRunSclk = false;
196 cfg->enableInputFilter = false;
197 cfg->enableMisoLateSample = false;
198 cfg->enableTransferSeperation = false;
199 cfg->ssPolarity = 0U;
200 cfg->enableWakeFromSleep = false;
201 cfg->rxFifoTriggerLevel = 0U;
202 cfg->rxFifoIntEnableMask = 0U;
203 cfg->txFifoTriggerLevel = 0U;
204 cfg->txFifoIntEnableMask = 0U;
205 cfg->masterSlaveIntEnableMask = 0U;
206 }
207
spi_psoc6_configure(const struct device * dev,const struct spi_config * spi_cfg)208 static int spi_psoc6_configure(const struct device *dev,
209 const struct spi_config *spi_cfg)
210 {
211 struct spi_psoc6_data *data = dev->data;
212 uint32_t word_size;
213
214 if (spi_context_configured(&data->ctx, spi_cfg)) {
215 /* This configuration is already in use */
216 return 0;
217 }
218
219 if (spi_cfg->operation & SPI_HALF_DUPLEX) {
220 LOG_ERR("Half-duplex not supported");
221 return -ENOTSUP;
222 }
223
224 word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
225 if (word_size > SPI_MAX_DATA_WIDTH) {
226 LOG_ERR("Word size %d is greater than %d",
227 word_size, SPI_MAX_DATA_WIDTH);
228 return -EINVAL;
229 }
230
231 if (SPI_OP_MODE_GET(spi_cfg->operation) == SPI_OP_MODE_MASTER) {
232 spi_psoc6_master_get_defaults(&data->cfg);
233
234 if (spi_cfg->slave > SPI_CHIP_SELECT_COUNT) {
235 LOG_ERR("Slave %d is greater than %d",
236 spi_cfg->slave, SPI_CHIP_SELECT_COUNT);
237 return -EINVAL;
238 }
239
240 data->cfg.rxDataWidth = data->cfg.txDataWidth = word_size;
241
242 if (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA) {
243 if (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL) {
244 data->cfg.sclkMode = CY_SCB_SPI_CPHA1_CPOL1;
245 } else {
246 data->cfg.sclkMode = CY_SCB_SPI_CPHA1_CPOL0;
247 }
248 } else {
249 if (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL) {
250 data->cfg.sclkMode = CY_SCB_SPI_CPHA0_CPOL1;
251 } else {
252 data->cfg.sclkMode = CY_SCB_SPI_CPHA0_CPOL0;
253 }
254 }
255
256 data->cfg.enableMsbFirst = !!!(spi_cfg->operation &
257 SPI_TRANSFER_LSB);
258 data->cfg.oversample = spi_psoc6_get_freqdiv(spi_cfg->frequency);
259
260 data->ctx.config = spi_cfg;
261 } else {
262 /* Slave mode is not implemented yet. */
263 return -ENOTSUP;
264 }
265
266 return 0;
267 }
268
spi_psoc6_transceive_sync_loop(const struct device * dev)269 static void spi_psoc6_transceive_sync_loop(const struct device *dev)
270 {
271 const struct spi_psoc6_config *config = dev->config;
272 struct spi_psoc6_data *data = dev->data;
273
274 while (data->xfer.dataSize > 0U) {
275 while (!Cy_SCB_IsTxComplete(config->base)) {
276 ;
277 }
278
279 if (data->xfer.rxData != NULL) {
280 Cy_SCB_SPI_ReadArray(config->base,
281 data->xfer.rxData,
282 data->xfer.dataSize);
283 } else {
284 Cy_SCB_ClearRxFifo(config->base);
285 }
286
287 spi_context_update_tx(&data->ctx, 1, data->xfer.dataSize);
288 spi_context_update_rx(&data->ctx, 1, data->xfer.dataSize);
289
290 spi_psoc6_transfer_next_packet(dev);
291 }
292 }
293
spi_psoc6_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)294 static int spi_psoc6_transceive(const struct device *dev,
295 const struct spi_config *spi_cfg,
296 const struct spi_buf_set *tx_bufs,
297 const struct spi_buf_set *rx_bufs,
298 bool asynchronous,
299 spi_callback_t cb,
300 void *userdata)
301 {
302 const struct spi_psoc6_config *config = dev->config;
303 struct spi_psoc6_data *data = dev->data;
304 int ret;
305
306 spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg);
307
308 LOG_DBG("\n\n");
309
310 ret = spi_psoc6_configure(dev, spi_cfg);
311 if (ret) {
312 goto out;
313 }
314
315 Cy_SCB_SPI_Init(config->base, &data->cfg, NULL);
316 Cy_SCB_SPI_SetActiveSlaveSelect(config->base, spi_cfg->slave);
317 Cy_SCB_SPI_Enable(config->base);
318
319 spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
320
321 spi_context_cs_control(&data->ctx, true);
322
323 spi_psoc6_transfer_next_packet(dev);
324
325 if (asynchronous) {
326 Cy_SCB_SetMasterInterruptMask(config->base,
327 CY_SCB_MASTER_INTR_SPI_DONE);
328 } else {
329 spi_psoc6_transceive_sync_loop(dev);
330 }
331
332 ret = spi_context_wait_for_completion(&data->ctx);
333
334 Cy_SCB_SPI_Disable(config->base, NULL);
335
336 out:
337 spi_context_release(&data->ctx, ret);
338
339 return ret;
340 }
341
spi_psoc6_transceive_sync(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)342 static int spi_psoc6_transceive_sync(const struct device *dev,
343 const struct spi_config *spi_cfg,
344 const struct spi_buf_set *tx_bufs,
345 const struct spi_buf_set *rx_bufs)
346 {
347 return spi_psoc6_transceive(dev, spi_cfg, tx_bufs,
348 rx_bufs, false, NULL, NULL);
349 }
350
351 #ifdef CONFIG_SPI_ASYNC
spi_psoc6_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)352 static int spi_psoc6_transceive_async(const struct device *dev,
353 const struct spi_config *spi_cfg,
354 const struct spi_buf_set *tx_bufs,
355 const struct spi_buf_set *rx_bufs,
356 spi_callback_t cb,
357 void *userdata)
358 {
359 return spi_psoc6_transceive(dev, spi_cfg, tx_bufs,
360 rx_bufs, true, cb, userdata);
361 }
362 #endif /* CONFIG_SPI_ASYNC */
363
spi_psoc6_release(const struct device * dev,const struct spi_config * config)364 static int spi_psoc6_release(const struct device *dev,
365 const struct spi_config *config)
366 {
367 struct spi_psoc6_data *data = dev->data;
368
369 spi_context_unlock_unconditionally(&data->ctx);
370
371 return 0;
372 }
373
spi_psoc6_init(const struct device * dev)374 static int spi_psoc6_init(const struct device *dev)
375 {
376 int err;
377 const struct spi_psoc6_config *config = dev->config;
378 struct spi_psoc6_data *data = dev->data;
379
380
381 /* Configure dt provided device signals when available */
382 err = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
383 if (err < 0) {
384 return err;
385 }
386
387 Cy_SysClk_PeriphAssignDivider(config->periph_id,
388 CY_SYSCLK_DIV_8_BIT,
389 SPI_PSOC6_CLK_DIV_NUMBER);
390 Cy_SysClk_PeriphSetDivider(CY_SYSCLK_DIV_8_BIT,
391 SPI_PSOC6_CLK_DIV_NUMBER, 0U);
392 Cy_SysClk_PeriphEnableDivider(CY_SYSCLK_DIV_8_BIT,
393 SPI_PSOC6_CLK_DIV_NUMBER);
394
395 #ifdef CONFIG_SPI_ASYNC
396 config->irq_config_func(dev);
397 #endif
398
399 err = spi_context_cs_configure_all(&data->ctx);
400 if (err < 0) {
401 return err;
402 }
403
404 return spi_psoc6_release(dev, NULL);
405 }
406
407 static const struct spi_driver_api spi_psoc6_driver_api = {
408 .transceive = spi_psoc6_transceive_sync,
409 #ifdef CONFIG_SPI_ASYNC
410 .transceive_async = spi_psoc6_transceive_async,
411 #endif
412 .release = spi_psoc6_release,
413 };
414
415 #define SPI_PSOC6_DEVICE_INIT(n) \
416 PINCTRL_DT_INST_DEFINE(n); \
417 static void spi_psoc6_spi##n##_irq_cfg(const struct device *port); \
418 static const struct spi_psoc6_config spi_psoc6_config_##n = { \
419 .base = (CySCB_Type *)DT_INST_REG_ADDR(n), \
420 .periph_id = DT_INST_PROP(n, peripheral_id), \
421 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
422 .irq_config_func = spi_psoc6_spi##n##_irq_cfg, \
423 }; \
424 static struct spi_psoc6_data spi_psoc6_dev_data_##n = { \
425 SPI_CONTEXT_INIT_LOCK(spi_psoc6_dev_data_##n, ctx), \
426 SPI_CONTEXT_INIT_SYNC(spi_psoc6_dev_data_##n, ctx), \
427 SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \
428 }; \
429 DEVICE_DT_INST_DEFINE(n, &spi_psoc6_init, NULL, \
430 &spi_psoc6_dev_data_##n, \
431 &spi_psoc6_config_##n, POST_KERNEL, \
432 CONFIG_SPI_INIT_PRIORITY, \
433 &spi_psoc6_driver_api); \
434 static void spi_psoc6_spi##n##_irq_cfg(const struct device *port) \
435 { \
436 CY_PSOC6_DT_INST_NVIC_INSTALL(n, \
437 spi_psoc6_isr); \
438 };
439
440 DT_INST_FOREACH_STATUS_OKAY(SPI_PSOC6_DEVICE_INIT)
441