1 /*
2 * Copyright (c) 2018, NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT nxp_imx_lpspi
8
9 #include <errno.h>
10 #include <drivers/spi.h>
11 #include <drivers/clock_control.h>
12 #include <fsl_lpspi.h>
13
14 #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL
15 #include <logging/log.h>
16 LOG_MODULE_REGISTER(spi_mcux_lpspi);
17
18 #include "spi_context.h"
19
20 #define CHIP_SELECT_COUNT 4
21 #define MAX_DATA_WIDTH 4096
22
23 struct spi_mcux_config {
24 LPSPI_Type *base;
25 const struct device *clock_dev;
26 clock_control_subsys_t clock_subsys;
27 void (*irq_config_func)(const struct device *dev);
28 uint32_t pcs_sck_delay;
29 uint32_t sck_pcs_delay;
30 uint32_t transfer_delay;
31 };
32
33 struct spi_mcux_data {
34 const struct device *dev;
35 lpspi_master_handle_t handle;
36 struct spi_context ctx;
37 size_t transfer_len;
38 };
39
spi_mcux_transfer_next_packet(const struct device * dev)40 static void spi_mcux_transfer_next_packet(const struct device *dev)
41 {
42 const struct spi_mcux_config *config = dev->config;
43 struct spi_mcux_data *data = dev->data;
44 LPSPI_Type *base = config->base;
45 struct spi_context *ctx = &data->ctx;
46 lpspi_transfer_t transfer;
47 status_t status;
48
49 if ((ctx->tx_len == 0) && (ctx->rx_len == 0)) {
50 /* nothing left to rx or tx, we're done! */
51 spi_context_cs_control(&data->ctx, false);
52 spi_context_complete(&data->ctx, 0);
53 return;
54 }
55
56 transfer.configFlags = kLPSPI_MasterPcsContinuous |
57 (ctx->config->slave << LPSPI_MASTER_PCS_SHIFT);
58
59 if (ctx->tx_len == 0) {
60 /* rx only, nothing to tx */
61 transfer.txData = NULL;
62 transfer.rxData = ctx->rx_buf;
63 transfer.dataSize = ctx->rx_len;
64 } else if (ctx->rx_len == 0) {
65 /* tx only, nothing to rx */
66 transfer.txData = (uint8_t *) ctx->tx_buf;
67 transfer.rxData = NULL;
68 transfer.dataSize = ctx->tx_len;
69 } else if (ctx->tx_len == ctx->rx_len) {
70 /* rx and tx are the same length */
71 transfer.txData = (uint8_t *) ctx->tx_buf;
72 transfer.rxData = ctx->rx_buf;
73 transfer.dataSize = ctx->tx_len;
74 } else if (ctx->tx_len > ctx->rx_len) {
75 /* Break up the tx into multiple transfers so we don't have to
76 * rx into a longer intermediate buffer. Leave chip select
77 * active between transfers.
78 */
79 transfer.txData = (uint8_t *) ctx->tx_buf;
80 transfer.rxData = ctx->rx_buf;
81 transfer.dataSize = ctx->rx_len;
82 transfer.configFlags |= kLPSPI_MasterPcsContinuous;
83 } else {
84 /* Break up the rx into multiple transfers so we don't have to
85 * tx from a longer intermediate buffer. Leave chip select
86 * active between transfers.
87 */
88 transfer.txData = (uint8_t *) ctx->tx_buf;
89 transfer.rxData = ctx->rx_buf;
90 transfer.dataSize = ctx->tx_len;
91 transfer.configFlags |= kLPSPI_MasterPcsContinuous;
92 }
93
94 if (!(ctx->tx_count <= 1 && ctx->rx_count <= 1)) {
95 transfer.configFlags |= kLPSPI_MasterPcsContinuous;
96 }
97
98 data->transfer_len = transfer.dataSize;
99
100 status = LPSPI_MasterTransferNonBlocking(base, &data->handle,
101 &transfer);
102 if (status != kStatus_Success) {
103 LOG_ERR("Transfer could not start");
104 }
105 }
106
spi_mcux_isr(const struct device * dev)107 static void spi_mcux_isr(const struct device *dev)
108 {
109 const struct spi_mcux_config *config = dev->config;
110 struct spi_mcux_data *data = dev->data;
111 LPSPI_Type *base = config->base;
112
113 LPSPI_MasterTransferHandleIRQ(base, &data->handle);
114 }
115
spi_mcux_master_transfer_callback(LPSPI_Type * base,lpspi_master_handle_t * handle,status_t status,void * userData)116 static void spi_mcux_master_transfer_callback(LPSPI_Type *base,
117 lpspi_master_handle_t *handle, status_t status, void *userData)
118 {
119 struct spi_mcux_data *data = userData;
120
121 spi_context_update_tx(&data->ctx, 1, data->transfer_len);
122 spi_context_update_rx(&data->ctx, 1, data->transfer_len);
123
124 spi_mcux_transfer_next_packet(data->dev);
125 }
126
spi_mcux_configure(const struct device * dev,const struct spi_config * spi_cfg)127 static int spi_mcux_configure(const struct device *dev,
128 const struct spi_config *spi_cfg)
129 {
130 const struct spi_mcux_config *config = dev->config;
131 struct spi_mcux_data *data = dev->data;
132 LPSPI_Type *base = config->base;
133 lpspi_master_config_t master_config;
134 uint32_t clock_freq;
135 uint32_t word_size;
136
137 if (spi_context_configured(&data->ctx, spi_cfg)) {
138 /* This configuration is already in use */
139 return 0;
140 }
141
142 LPSPI_MasterGetDefaultConfig(&master_config);
143
144 if (spi_cfg->slave > CHIP_SELECT_COUNT) {
145 LOG_ERR("Slave %d is greater than %d",
146 spi_cfg->slave,
147 CHIP_SELECT_COUNT);
148 return -EINVAL;
149 }
150
151 word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
152 if (word_size > MAX_DATA_WIDTH) {
153 LOG_ERR("Word size %d is greater than %d",
154 word_size, MAX_DATA_WIDTH);
155 return -EINVAL;
156 }
157
158 master_config.bitsPerFrame = word_size;
159
160 master_config.cpol =
161 (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL)
162 ? kLPSPI_ClockPolarityActiveLow
163 : kLPSPI_ClockPolarityActiveHigh;
164
165 master_config.cpha =
166 (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA)
167 ? kLPSPI_ClockPhaseSecondEdge
168 : kLPSPI_ClockPhaseFirstEdge;
169
170 master_config.direction =
171 (spi_cfg->operation & SPI_TRANSFER_LSB)
172 ? kLPSPI_LsbFirst
173 : kLPSPI_MsbFirst;
174
175 master_config.baudRate = spi_cfg->frequency;
176
177 master_config.pcsToSckDelayInNanoSec = config->pcs_sck_delay;
178 master_config.lastSckToPcsDelayInNanoSec = config->sck_pcs_delay;
179 master_config.betweenTransferDelayInNanoSec = config->transfer_delay;
180
181 if (clock_control_get_rate(config->clock_dev, config->clock_subsys,
182 &clock_freq)) {
183 return -EINVAL;
184 }
185
186 LPSPI_MasterInit(base, &master_config, clock_freq);
187
188 LPSPI_MasterTransferCreateHandle(base, &data->handle,
189 spi_mcux_master_transfer_callback,
190 data);
191
192 LPSPI_SetDummyData(base, 0);
193
194 data->ctx.config = spi_cfg;
195 spi_context_cs_configure(&data->ctx);
196
197 return 0;
198 }
199
transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,struct k_poll_signal * signal)200 static int transceive(const struct device *dev,
201 const struct spi_config *spi_cfg,
202 const struct spi_buf_set *tx_bufs,
203 const struct spi_buf_set *rx_bufs,
204 bool asynchronous,
205 struct k_poll_signal *signal)
206 {
207 struct spi_mcux_data *data = dev->data;
208 int ret;
209
210 spi_context_lock(&data->ctx, asynchronous, signal, spi_cfg);
211
212 ret = spi_mcux_configure(dev, spi_cfg);
213 if (ret) {
214 goto out;
215 }
216
217 spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
218
219 spi_context_cs_control(&data->ctx, true);
220
221 spi_mcux_transfer_next_packet(dev);
222
223 ret = spi_context_wait_for_completion(&data->ctx);
224 out:
225 spi_context_release(&data->ctx, ret);
226
227 return ret;
228 }
229
spi_mcux_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)230 static int spi_mcux_transceive(const struct device *dev,
231 const struct spi_config *spi_cfg,
232 const struct spi_buf_set *tx_bufs,
233 const struct spi_buf_set *rx_bufs)
234 {
235 return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL);
236 }
237
238 #ifdef CONFIG_SPI_ASYNC
spi_mcux_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,struct k_poll_signal * async)239 static int spi_mcux_transceive_async(const struct device *dev,
240 const struct spi_config *spi_cfg,
241 const struct spi_buf_set *tx_bufs,
242 const struct spi_buf_set *rx_bufs,
243 struct k_poll_signal *async)
244 {
245 return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, async);
246 }
247 #endif /* CONFIG_SPI_ASYNC */
248
spi_mcux_release(const struct device * dev,const struct spi_config * spi_cfg)249 static int spi_mcux_release(const struct device *dev,
250 const struct spi_config *spi_cfg)
251 {
252 struct spi_mcux_data *data = dev->data;
253
254 spi_context_unlock_unconditionally(&data->ctx);
255
256 return 0;
257 }
258
spi_mcux_init(const struct device * dev)259 static int spi_mcux_init(const struct device *dev)
260 {
261 const struct spi_mcux_config *config = dev->config;
262 struct spi_mcux_data *data = dev->data;
263
264 config->irq_config_func(dev);
265
266 spi_context_unlock_unconditionally(&data->ctx);
267
268 data->dev = dev;
269
270 return 0;
271 }
272
273 static const struct spi_driver_api spi_mcux_driver_api = {
274 .transceive = spi_mcux_transceive,
275 #ifdef CONFIG_SPI_ASYNC
276 .transceive_async = spi_mcux_transceive_async,
277 #endif
278 .release = spi_mcux_release,
279 };
280
281 #define SPI_MCUX_LPSPI_INIT(n) \
282 static void spi_mcux_config_func_##n(const struct device *dev); \
283 \
284 static const struct spi_mcux_config spi_mcux_config_##n = { \
285 .base = (LPSPI_Type *) DT_INST_REG_ADDR(n), \
286 .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \
287 .clock_subsys = \
288 (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name), \
289 .irq_config_func = spi_mcux_config_func_##n, \
290 .pcs_sck_delay = UTIL_AND( \
291 DT_INST_NODE_HAS_PROP(n, pcs_sck_delay), \
292 DT_INST_PROP(n, pcs_sck_delay)), \
293 .sck_pcs_delay = UTIL_AND( \
294 DT_INST_NODE_HAS_PROP(n, sck_pcs_delay), \
295 DT_INST_PROP(n, sck_pcs_delay)), \
296 .transfer_delay = UTIL_AND( \
297 DT_INST_NODE_HAS_PROP(n, transfer_delay), \
298 DT_INST_PROP(n, transfer_delay)), \
299 }; \
300 \
301 static struct spi_mcux_data spi_mcux_data_##n = { \
302 SPI_CONTEXT_INIT_LOCK(spi_mcux_data_##n, ctx), \
303 SPI_CONTEXT_INIT_SYNC(spi_mcux_data_##n, ctx), \
304 }; \
305 \
306 DEVICE_DT_INST_DEFINE(n, &spi_mcux_init, NULL, \
307 &spi_mcux_data_##n, \
308 &spi_mcux_config_##n, POST_KERNEL, \
309 CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \
310 &spi_mcux_driver_api); \
311 \
312 static void spi_mcux_config_func_##n(const struct device *dev) \
313 { \
314 IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \
315 spi_mcux_isr, DEVICE_DT_INST_GET(n), 0); \
316 \
317 irq_enable(DT_INST_IRQN(n)); \
318 }
319
320 DT_INST_FOREACH_STATUS_OKAY(SPI_MCUX_LPSPI_INIT)
321