1 /*
2 * Copyright (c) 2024, Basalte bv
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT nxp_imx_ecspi
8
9 #include <zephyr/logging/log.h>
10 LOG_MODULE_REGISTER(spi_mcux_ecspi, CONFIG_SPI_LOG_LEVEL);
11
12 #include <zephyr/device.h>
13 #include <zephyr/drivers/clock_control.h>
14 #include <zephyr/drivers/pinctrl.h>
15 #include <zephyr/drivers/spi.h>
16 #include <zephyr/drivers/spi/rtio.h>
17 #include <fsl_ecspi.h>
18
19 #include "spi_context.h"
20
21 #define SPI_MCUX_ECSPI_MAX_BURST 4096
22
23 struct spi_mcux_config {
24 ECSPI_Type *base;
25 const struct pinctrl_dev_config *pincfg;
26 const struct device *clock_dev;
27 clock_control_subsys_t clock_subsys;
28 void (*irq_config_func)(const struct device *dev);
29 };
30
31 struct spi_mcux_data {
32 ecspi_master_handle_t handle;
33 struct spi_context ctx;
34
35 uint16_t dfs;
36 uint16_t word_size;
37
38 uint32_t rx_data;
39 uint32_t tx_data;
40 };
41
bytes_per_word(uint16_t bits_per_word)42 static inline uint16_t bytes_per_word(uint16_t bits_per_word)
43 {
44 if (bits_per_word <= 8U) {
45 return 1U;
46 }
47 if (bits_per_word <= 16U) {
48 return 2U;
49 }
50
51 return 4U;
52 }
53
spi_mcux_transfer_next_packet(const struct device * dev)54 static void spi_mcux_transfer_next_packet(const struct device *dev)
55 {
56 const struct spi_mcux_config *config = dev->config;
57 struct spi_mcux_data *data = dev->data;
58 ECSPI_Type *base = config->base;
59 struct spi_context *ctx = &data->ctx;
60 ecspi_transfer_t transfer;
61 status_t status;
62
63 if ((ctx->tx_len == 0) && (ctx->rx_len == 0)) {
64 /* nothing left to rx or tx, we're done! */
65 spi_context_cs_control(&data->ctx, false);
66 spi_context_complete(&data->ctx, dev, 0);
67 return;
68 }
69
70 transfer.channel = ctx->config->slave;
71
72 if (spi_context_rx_buf_on(ctx)) {
73 transfer.rxData = &data->rx_data;
74 } else {
75 transfer.rxData = NULL;
76 }
77
78 if (spi_context_tx_buf_on(ctx)) {
79 switch (data->dfs) {
80 case 1U:
81 data->tx_data = UNALIGNED_GET((uint8_t *)ctx->tx_buf);
82 break;
83 case 2U:
84 data->tx_data = UNALIGNED_GET((uint16_t *)ctx->tx_buf);
85 break;
86 case 4U:
87 data->tx_data = UNALIGNED_GET((uint32_t *)ctx->tx_buf);
88 break;
89 }
90
91 transfer.txData = &data->tx_data;
92 } else {
93 transfer.txData = NULL;
94 }
95
96 /* Burst length is set in the configure step */
97 transfer.dataSize = 1;
98
99 status = ECSPI_MasterTransferNonBlocking(base, &data->handle, &transfer);
100 if (status != kStatus_Success) {
101 LOG_ERR("Transfer could not start");
102 spi_context_cs_control(&data->ctx, false);
103 spi_context_complete(&data->ctx, dev, -EIO);
104 }
105 }
106
spi_mcux_isr(const struct device * dev)107 static void spi_mcux_isr(const struct device *dev)
108 {
109 const struct spi_mcux_config *config = dev->config;
110 struct spi_mcux_data *data = dev->data;
111 ECSPI_Type *base = config->base;
112
113 ECSPI_MasterTransferHandleIRQ(base, &data->handle);
114 }
115
spi_mcux_master_transfer_callback(ECSPI_Type * base,ecspi_master_handle_t * handle,status_t status,void * user_data)116 static void spi_mcux_master_transfer_callback(ECSPI_Type *base, ecspi_master_handle_t *handle,
117 status_t status, void *user_data)
118 {
119 const struct device *dev = (const struct device *)user_data;
120 struct spi_mcux_data *data = dev->data;
121
122 if (spi_context_rx_buf_on(&data->ctx)) {
123 switch (data->dfs) {
124 case 1:
125 UNALIGNED_PUT(data->rx_data, (uint8_t *)data->ctx.rx_buf);
126 break;
127 case 2:
128 UNALIGNED_PUT(data->rx_data, (uint16_t *)data->ctx.rx_buf);
129 break;
130 case 4:
131 UNALIGNED_PUT(data->rx_data, (uint32_t *)data->ctx.rx_buf);
132 break;
133 }
134 }
135
136 spi_context_update_tx(&data->ctx, data->dfs, 1);
137 spi_context_update_rx(&data->ctx, data->dfs, 1);
138
139 spi_mcux_transfer_next_packet(dev);
140 }
141
spi_mcux_configure(const struct device * dev,const struct spi_config * spi_cfg)142 static int spi_mcux_configure(const struct device *dev,
143 const struct spi_config *spi_cfg)
144 {
145 const struct spi_mcux_config *config = dev->config;
146 struct spi_mcux_data *data = dev->data;
147 ECSPI_Type *base = config->base;
148 ecspi_master_config_t master_config;
149 uint32_t clock_freq;
150 uint16_t word_size;
151
152 if (spi_context_configured(&data->ctx, spi_cfg)) {
153 /* This configuration is already in use */
154 return 0;
155 }
156
157 if (spi_cfg->operation & SPI_HALF_DUPLEX) {
158 LOG_ERR("Half-duplex not supported");
159 return -ENOTSUP;
160 }
161
162 if (spi_cfg->operation & SPI_TRANSFER_LSB) {
163 LOG_ERR("HW byte re-ordering not supported");
164 return -ENOTSUP;
165 }
166
167 if (!spi_cs_is_gpio(spi_cfg) && spi_cfg->slave > kECSPI_Channel3) {
168 LOG_ERR("Slave %d is greater than %d", spi_cfg->slave, kECSPI_Channel3);
169 return -EINVAL;
170 }
171
172 if (clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_freq)) {
173 LOG_ERR("Failed to get clock rate");
174 return -EINVAL;
175 }
176
177 word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
178 if (0 == word_size || word_size > 32) {
179 LOG_ERR("Invalid word size (0 < %d <= 32)", word_size);
180 return -EINVAL;
181 }
182
183 ECSPI_MasterGetDefaultConfig(&master_config);
184
185 master_config.channel =
186 spi_cs_is_gpio(spi_cfg) ? kECSPI_Channel0 : (ecspi_channel_source_t)spi_cfg->slave;
187 master_config.channelConfig.polarity =
188 (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL)
189 ? kECSPI_PolarityActiveLow
190 : kECSPI_PolarityActiveHigh;
191 master_config.channelConfig.phase =
192 (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA)
193 ? kECSPI_ClockPhaseSecondEdge
194 : kECSPI_ClockPhaseFirstEdge;
195 master_config.baudRate_Bps = spi_cfg->frequency;
196 master_config.burstLength = word_size;
197
198 master_config.enableLoopback = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_LOOP);
199
200 if (!spi_cs_is_gpio(spi_cfg)) {
201 uint32_t clock_cycles =
202 DIV_ROUND_UP(spi_cfg->cs.delay * USEC_PER_SEC, spi_cfg->frequency);
203
204 if (clock_cycles > 63U) {
205 LOG_ERR("CS delay is greater than 63 clock cycles (%u)", clock_cycles);
206 return -EINVAL;
207 }
208 master_config.chipSelectDelay = (uint8_t)clock_cycles;
209 }
210
211 ECSPI_MasterInit(base, &master_config, clock_freq);
212 ECSPI_MasterTransferCreateHandle(base, &data->handle,
213 spi_mcux_master_transfer_callback,
214 (void *)dev);
215
216 data->word_size = word_size;
217 data->dfs = bytes_per_word(word_size);
218 data->ctx.config = spi_cfg;
219
220 return 0;
221 }
222
transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)223 static int transceive(const struct device *dev,
224 const struct spi_config *spi_cfg,
225 const struct spi_buf_set *tx_bufs,
226 const struct spi_buf_set *rx_bufs,
227 bool asynchronous,
228 spi_callback_t cb,
229 void *userdata)
230 {
231 struct spi_mcux_data *data = dev->data;
232 int ret;
233
234 spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg);
235
236 ret = spi_mcux_configure(dev, spi_cfg);
237 if (ret) {
238 goto out;
239 }
240
241 spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, data->dfs);
242 spi_context_cs_control(&data->ctx, true);
243
244 spi_mcux_transfer_next_packet(dev);
245 ret = spi_context_wait_for_completion(&data->ctx);
246
247 out:
248 spi_context_release(&data->ctx, ret);
249
250 return ret;
251 }
252
spi_mcux_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)253 static int spi_mcux_transceive(const struct device *dev,
254 const struct spi_config *spi_cfg,
255 const struct spi_buf_set *tx_bufs,
256 const struct spi_buf_set *rx_bufs)
257 {
258 return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL);
259 }
260
261 #ifdef CONFIG_SPI_ASYNC
spi_mcux_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)262 static int spi_mcux_transceive_async(const struct device *dev,
263 const struct spi_config *spi_cfg,
264 const struct spi_buf_set *tx_bufs,
265 const struct spi_buf_set *rx_bufs,
266 spi_callback_t cb,
267 void *userdata)
268 {
269 return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata);
270 }
271 #endif /* CONFIG_SPI_ASYNC */
272
spi_mcux_release(const struct device * dev,const struct spi_config * spi_cfg)273 static int spi_mcux_release(const struct device *dev, const struct spi_config *spi_cfg)
274 {
275 struct spi_mcux_data *data = dev->data;
276
277 ARG_UNUSED(spi_cfg);
278
279 spi_context_unlock_unconditionally(&data->ctx);
280
281 return 0;
282 }
283
spi_mcux_init(const struct device * dev)284 static int spi_mcux_init(const struct device *dev)
285 {
286 int ret;
287 const struct spi_mcux_config *config = dev->config;
288 struct spi_mcux_data *data = dev->data;
289
290 config->irq_config_func(dev);
291
292 ret = spi_context_cs_configure_all(&data->ctx);
293 if (ret < 0) {
294 return ret;
295 }
296
297 ret = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
298 if (ret < 0) {
299 return ret;
300 }
301
302 spi_context_unlock_unconditionally(&data->ctx);
303
304 return 0;
305 }
306
307 static DEVICE_API(spi, spi_mcux_driver_api) = {
308 .transceive = spi_mcux_transceive,
309 #ifdef CONFIG_SPI_ASYNC
310 .transceive_async = spi_mcux_transceive_async,
311 #endif
312 #ifdef CONFIG_SPI_RTIO
313 .iodev_submit = spi_rtio_iodev_default_submit,
314 #endif
315 .release = spi_mcux_release,
316 };
317
318 #define SPI_MCUX_ECSPI_INIT(n) \
319 PINCTRL_DT_INST_DEFINE(n); \
320 static void spi_mcux_config_func_##n(const struct device *dev); \
321 \
322 static const struct spi_mcux_config spi_mcux_config_##n = { \
323 .base = (ECSPI_Type *) DT_INST_REG_ADDR(n), \
324 .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
325 .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \
326 .clock_subsys = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name), \
327 .irq_config_func = spi_mcux_config_func_##n, \
328 }; \
329 \
330 static struct spi_mcux_data spi_mcux_data_##n = { \
331 SPI_CONTEXT_INIT_LOCK(spi_mcux_data_##n, ctx), \
332 SPI_CONTEXT_INIT_SYNC(spi_mcux_data_##n, ctx), \
333 SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \
334 }; \
335 \
336 SPI_DEVICE_DT_INST_DEFINE(n, spi_mcux_init, NULL, \
337 &spi_mcux_data_##n, &spi_mcux_config_##n, \
338 POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \
339 &spi_mcux_driver_api); \
340 \
341 static void spi_mcux_config_func_##n(const struct device *dev) \
342 { \
343 IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \
344 spi_mcux_isr, DEVICE_DT_INST_GET(n), 0); \
345 \
346 irq_enable(DT_INST_IRQN(n)); \
347 }
348
349 DT_INST_FOREACH_STATUS_OKAY(SPI_MCUX_ECSPI_INIT)
350