1 /*
2 * Copyright (c) 2018, NXP
3 *
4 * Forked off the spi_mcux_lpi2c driver.
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #define DT_DRV_COMPAT openisa_rv32m1_lpspi
10
11 #include <errno.h>
12 #include <drivers/spi.h>
13 #include <drivers/clock_control.h>
14 #include <fsl_lpspi.h>
15
16 #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL
17 #include <logging/log.h>
18 LOG_MODULE_REGISTER(spi_rv32m1_lpspi);
19
20 #include "spi_context.h"
21
22 #define CHIP_SELECT_COUNT 4
23 #define MAX_DATA_WIDTH 4096
24
25 struct spi_mcux_config {
26 LPSPI_Type *base;
27 const struct device *clock_dev;
28 clock_control_subsys_t clock_subsys;
29 clock_ip_name_t clock_ip_name;
30 uint32_t clock_ip_src;
31 void (*irq_config_func)(const struct device *dev);
32 };
33
34 struct spi_mcux_data {
35 const struct device *dev;
36 lpspi_master_handle_t handle;
37 struct spi_context ctx;
38 size_t transfer_len;
39 };
40
spi_mcux_transfer_next_packet(const struct device * dev)41 static void spi_mcux_transfer_next_packet(const struct device *dev)
42 {
43 const struct spi_mcux_config *config = dev->config;
44 struct spi_mcux_data *data = dev->data;
45 LPSPI_Type *base = config->base;
46 struct spi_context *ctx = &data->ctx;
47 lpspi_transfer_t transfer;
48 status_t status;
49
50 if ((ctx->tx_len == 0) && (ctx->rx_len == 0)) {
51 /* nothing left to rx or tx, we're done! */
52 spi_context_cs_control(&data->ctx, false);
53 spi_context_complete(&data->ctx, 0);
54 return;
55 }
56
57 transfer.configFlags = kLPSPI_MasterPcsContinuous |
58 (ctx->config->slave << LPSPI_MASTER_PCS_SHIFT);
59
60 if (ctx->tx_len == 0) {
61 /* rx only, nothing to tx */
62 transfer.txData = NULL;
63 transfer.rxData = ctx->rx_buf;
64 transfer.dataSize = ctx->rx_len;
65 } else if (ctx->rx_len == 0) {
66 /* tx only, nothing to rx */
67 transfer.txData = (uint8_t *) ctx->tx_buf;
68 transfer.rxData = NULL;
69 transfer.dataSize = ctx->tx_len;
70 } else if (ctx->tx_len == ctx->rx_len) {
71 /* rx and tx are the same length */
72 transfer.txData = (uint8_t *) ctx->tx_buf;
73 transfer.rxData = ctx->rx_buf;
74 transfer.dataSize = ctx->tx_len;
75 } else if (ctx->tx_len > ctx->rx_len) {
76 /* Break up the tx into multiple transfers so we don't have to
77 * rx into a longer intermediate buffer. Leave chip select
78 * active between transfers.
79 */
80 transfer.txData = (uint8_t *) ctx->tx_buf;
81 transfer.rxData = ctx->rx_buf;
82 transfer.dataSize = ctx->rx_len;
83 transfer.configFlags |= kLPSPI_MasterPcsContinuous;
84 } else {
85 /* Break up the rx into multiple transfers so we don't have to
86 * tx from a longer intermediate buffer. Leave chip select
87 * active between transfers.
88 */
89 transfer.txData = (uint8_t *) ctx->tx_buf;
90 transfer.rxData = ctx->rx_buf;
91 transfer.dataSize = ctx->tx_len;
92 transfer.configFlags |= kLPSPI_MasterPcsContinuous;
93 }
94
95 if (!(ctx->tx_count <= 1 && ctx->rx_count <= 1)) {
96 transfer.configFlags |= kLPSPI_MasterPcsContinuous;
97 }
98
99 data->transfer_len = transfer.dataSize;
100 status = LPSPI_MasterTransferNonBlocking(base, &data->handle,
101 &transfer);
102 if (status != kStatus_Success) {
103 LOG_ERR("Transfer could not start");
104 }
105 }
106
spi_mcux_isr(const struct device * dev)107 static void spi_mcux_isr(const struct device *dev)
108 {
109 const struct spi_mcux_config *config = dev->config;
110 struct spi_mcux_data *data = dev->data;
111 LPSPI_Type *base = config->base;
112
113 LPSPI_MasterTransferHandleIRQ(base, &data->handle);
114 }
115
spi_mcux_master_transfer_callback(LPSPI_Type * base,lpspi_master_handle_t * handle,status_t status,void * userData)116 static void spi_mcux_master_transfer_callback(LPSPI_Type *base,
117 lpspi_master_handle_t *handle,
118 status_t status, void *userData)
119 {
120 struct spi_mcux_data *data = userData;
121
122 spi_context_update_tx(&data->ctx, 1, data->transfer_len);
123 spi_context_update_rx(&data->ctx, 1, data->transfer_len);
124
125 spi_mcux_transfer_next_packet(data->dev);
126 }
127
spi_mcux_configure(const struct device * dev,const struct spi_config * spi_cfg)128 static int spi_mcux_configure(const struct device *dev,
129 const struct spi_config *spi_cfg)
130 {
131 const struct spi_mcux_config *config = dev->config;
132 struct spi_mcux_data *data = dev->data;
133 LPSPI_Type *base = config->base;
134 lpspi_master_config_t master_config;
135 uint32_t clock_freq;
136 uint32_t word_size;
137
138 if (spi_context_configured(&data->ctx, spi_cfg)) {
139 /* This configuration is already in use */
140 return 0;
141 }
142
143 LPSPI_MasterGetDefaultConfig(&master_config);
144
145 if (spi_cfg->slave > CHIP_SELECT_COUNT) {
146 LOG_ERR("Slave %d is greater than %d",
147 spi_cfg->slave,
148 CHIP_SELECT_COUNT);
149 return -EINVAL;
150 }
151
152 word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
153 if (word_size > MAX_DATA_WIDTH) {
154 LOG_ERR("Word size %d is greater than %d",
155 word_size, MAX_DATA_WIDTH);
156 return -EINVAL;
157 }
158
159 master_config.bitsPerFrame = word_size;
160
161 master_config.cpol =
162 (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL)
163 ? kLPSPI_ClockPolarityActiveLow
164 : kLPSPI_ClockPolarityActiveHigh;
165
166 master_config.cpha =
167 (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA)
168 ? kLPSPI_ClockPhaseSecondEdge
169 : kLPSPI_ClockPhaseFirstEdge;
170
171 master_config.direction =
172 (spi_cfg->operation & SPI_TRANSFER_LSB)
173 ? kLPSPI_LsbFirst
174 : kLPSPI_MsbFirst;
175
176 master_config.baudRate = spi_cfg->frequency;
177
178 if (clock_control_get_rate(config->clock_dev, config->clock_subsys,
179 &clock_freq)) {
180 return -EINVAL;
181 }
182
183 LPSPI_MasterInit(base, &master_config, clock_freq);
184
185 LPSPI_MasterTransferCreateHandle(base, &data->handle,
186 spi_mcux_master_transfer_callback,
187 data);
188
189 LPSPI_SetDummyData(base, 0);
190
191 data->ctx.config = spi_cfg;
192 spi_context_cs_configure(&data->ctx);
193
194 return 0;
195 }
196
transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,struct k_poll_signal * signal)197 static int transceive(const struct device *dev,
198 const struct spi_config *spi_cfg,
199 const struct spi_buf_set *tx_bufs,
200 const struct spi_buf_set *rx_bufs,
201 bool asynchronous,
202 struct k_poll_signal *signal)
203 {
204 struct spi_mcux_data *data = dev->data;
205 int ret;
206
207 spi_context_lock(&data->ctx, asynchronous, signal, spi_cfg);
208
209 ret = spi_mcux_configure(dev, spi_cfg);
210 if (ret) {
211 goto out;
212 }
213
214 spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
215
216 spi_context_cs_control(&data->ctx, true);
217
218 spi_mcux_transfer_next_packet(dev);
219
220 ret = spi_context_wait_for_completion(&data->ctx);
221 out:
222 spi_context_release(&data->ctx, ret);
223
224 return ret;
225 }
226
spi_mcux_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)227 static int spi_mcux_transceive(const struct device *dev,
228 const struct spi_config *spi_cfg,
229 const struct spi_buf_set *tx_bufs,
230 const struct spi_buf_set *rx_bufs)
231 {
232 return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL);
233 }
234
235 #ifdef CONFIG_SPI_ASYNC
spi_mcux_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,struct k_poll_signal * async)236 static int spi_mcux_transceive_async(const struct device *dev,
237 const struct spi_config *spi_cfg,
238 const struct spi_buf_set *tx_bufs,
239 const struct spi_buf_set *rx_bufs,
240 struct k_poll_signal *async)
241 {
242 return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, async);
243 }
244 #endif /* CONFIG_SPI_ASYNC */
245
spi_mcux_release(const struct device * dev,const struct spi_config * spi_cfg)246 static int spi_mcux_release(const struct device *dev,
247 const struct spi_config *spi_cfg)
248 {
249 struct spi_mcux_data *data = dev->data;
250
251 spi_context_unlock_unconditionally(&data->ctx);
252
253 return 0;
254 }
255
spi_mcux_init(const struct device * dev)256 static int spi_mcux_init(const struct device *dev)
257 {
258 const struct spi_mcux_config *config = dev->config;
259 struct spi_mcux_data *data = dev->data;
260
261 CLOCK_SetIpSrc(config->clock_ip_name, config->clock_ip_src);
262
263 config->irq_config_func(dev);
264
265 data->dev = dev;
266
267 spi_context_unlock_unconditionally(&data->ctx);
268
269 return 0;
270 }
271
272 static const struct spi_driver_api spi_mcux_driver_api = {
273 .transceive = spi_mcux_transceive,
274 #ifdef CONFIG_SPI_ASYNC
275 .transceive_async = spi_mcux_transceive_async,
276 #endif
277 .release = spi_mcux_release,
278 };
279
280 #define SPI_RV32M1_INIT(n) \
281 static void spi_mcux_config_func_##n(const struct device *dev); \
282 \
283 static const struct spi_mcux_config spi_mcux_config_##n = { \
284 .base = (LPSPI_Type *) DT_INST_REG_ADDR(n), \
285 .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \
286 .clock_subsys = (clock_control_subsys_t) \
287 DT_INST_CLOCKS_CELL(n, name), \
288 .irq_config_func = spi_mcux_config_func_##n, \
289 .clock_ip_name = INST_DT_CLOCK_IP_NAME(n), \
290 .clock_ip_src = kCLOCK_IpSrcFircAsync, \
291 }; \
292 \
293 static struct spi_mcux_data spi_mcux_data_##n = { \
294 SPI_CONTEXT_INIT_LOCK(spi_mcux_data_##n, ctx), \
295 SPI_CONTEXT_INIT_SYNC(spi_mcux_data_##n, ctx), \
296 }; \
297 \
298 DEVICE_DT_INST_DEFINE(n, &spi_mcux_init, NULL, \
299 &spi_mcux_data_##n, \
300 &spi_mcux_config_##n, \
301 POST_KERNEL, \
302 CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \
303 &spi_mcux_driver_api); \
304 \
305 static void spi_mcux_config_func_##n(const struct device *dev) \
306 { \
307 IRQ_CONNECT(DT_INST_IRQN(n), \
308 0, \
309 spi_mcux_isr, DEVICE_DT_INST_GET(n), 0); \
310 irq_enable(DT_INST_IRQN(n)); \
311 }
312
313 DT_INST_FOREACH_STATUS_OKAY(SPI_RV32M1_INIT)
314