1 /*
2 * Copyright (c) 2017 - 2018, Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/drivers/spi.h>
8 #include <zephyr/drivers/spi/rtio.h>
9 #include <zephyr/pm/device.h>
10 #include <zephyr/drivers/pinctrl.h>
11 #include <soc.h>
12 #include <nrfx_spi.h>
13
14 #include <zephyr/logging/log.h>
15 #include <zephyr/irq.h>
16 LOG_MODULE_REGISTER(spi_nrfx_spi, CONFIG_SPI_LOG_LEVEL);
17
18 #include "spi_context.h"
19 #include "spi_nrfx_common.h"
20
21 struct spi_nrfx_data {
22 struct spi_context ctx;
23 const struct device *dev;
24 size_t chunk_len;
25 bool busy;
26 bool initialized;
27 };
28
29 struct spi_nrfx_config {
30 nrfx_spi_t spi;
31 nrfx_spi_config_t def_config;
32 void (*irq_connect)(void);
33 const struct pinctrl_dev_config *pcfg;
34 uint32_t wake_pin;
35 nrfx_gpiote_t wake_gpiote;
36 };
37
38 static void event_handler(const nrfx_spi_evt_t *p_event, void *p_context);
39
get_nrf_spi_frequency(uint32_t frequency)40 static inline nrf_spi_frequency_t get_nrf_spi_frequency(uint32_t frequency)
41 {
42 /* Get the highest supported frequency not exceeding the requested one.
43 */
44 if (frequency < 250000) {
45 return NRF_SPI_FREQ_125K;
46 } else if (frequency < 500000) {
47 return NRF_SPI_FREQ_250K;
48 } else if (frequency < 1000000) {
49 return NRF_SPI_FREQ_500K;
50 } else if (frequency < 2000000) {
51 return NRF_SPI_FREQ_1M;
52 } else if (frequency < 4000000) {
53 return NRF_SPI_FREQ_2M;
54 } else if (frequency < 8000000) {
55 return NRF_SPI_FREQ_4M;
56 } else {
57 return NRF_SPI_FREQ_8M;
58 }
59 }
60
get_nrf_spi_mode(uint16_t operation)61 static inline nrf_spi_mode_t get_nrf_spi_mode(uint16_t operation)
62 {
63 if (SPI_MODE_GET(operation) & SPI_MODE_CPOL) {
64 if (SPI_MODE_GET(operation) & SPI_MODE_CPHA) {
65 return NRF_SPI_MODE_3;
66 } else {
67 return NRF_SPI_MODE_2;
68 }
69 } else {
70 if (SPI_MODE_GET(operation) & SPI_MODE_CPHA) {
71 return NRF_SPI_MODE_1;
72 } else {
73 return NRF_SPI_MODE_0;
74 }
75 }
76 }
77
get_nrf_spi_bit_order(uint16_t operation)78 static inline nrf_spi_bit_order_t get_nrf_spi_bit_order(uint16_t operation)
79 {
80 if (operation & SPI_TRANSFER_LSB) {
81 return NRF_SPI_BIT_ORDER_LSB_FIRST;
82 } else {
83 return NRF_SPI_BIT_ORDER_MSB_FIRST;
84 }
85 }
86
configure(const struct device * dev,const struct spi_config * spi_cfg)87 static int configure(const struct device *dev,
88 const struct spi_config *spi_cfg)
89 {
90 struct spi_nrfx_data *dev_data = dev->data;
91 const struct spi_nrfx_config *dev_config = dev->config;
92 struct spi_context *ctx = &dev_data->ctx;
93 nrfx_spi_config_t config;
94 nrfx_err_t result;
95 uint32_t sck_pin;
96
97 if (dev_data->initialized && spi_context_configured(ctx, spi_cfg)) {
98 /* Already configured. No need to do it again. */
99 return 0;
100 }
101
102 if (spi_cfg->operation & SPI_HALF_DUPLEX) {
103 LOG_ERR("Half-duplex not supported");
104 return -ENOTSUP;
105 }
106
107 if (SPI_OP_MODE_GET(spi_cfg->operation) != SPI_OP_MODE_MASTER) {
108 LOG_ERR("Slave mode is not supported on %s", dev->name);
109 return -EINVAL;
110 }
111
112 if (spi_cfg->operation & SPI_MODE_LOOP) {
113 LOG_ERR("Loopback mode is not supported");
114 return -EINVAL;
115 }
116
117 if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) &&
118 (spi_cfg->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) {
119 LOG_ERR("Only single line mode is supported");
120 return -EINVAL;
121 }
122
123 if (SPI_WORD_SIZE_GET(spi_cfg->operation) != 8) {
124 LOG_ERR("Word sizes other than 8 bits are not supported");
125 return -EINVAL;
126 }
127
128 if (spi_cfg->frequency < 125000) {
129 LOG_ERR("Frequencies lower than 125 kHz are not supported");
130 return -EINVAL;
131 }
132
133 config = dev_config->def_config;
134
135 config.frequency = get_nrf_spi_frequency(spi_cfg->frequency);
136 config.mode = get_nrf_spi_mode(spi_cfg->operation);
137 config.bit_order = get_nrf_spi_bit_order(spi_cfg->operation);
138
139 sck_pin = nrf_spi_sck_pin_get(dev_config->spi.p_reg);
140
141 if (sck_pin != NRF_SPI_PIN_NOT_CONNECTED) {
142 nrf_gpio_pin_write(sck_pin, spi_cfg->operation & SPI_MODE_CPOL ? 1 : 0);
143 }
144
145 if (dev_data->initialized) {
146 nrfx_spi_uninit(&dev_config->spi);
147 dev_data->initialized = false;
148 }
149
150 result = nrfx_spi_init(&dev_config->spi, &config,
151 event_handler, dev_data);
152 if (result != NRFX_SUCCESS) {
153 LOG_ERR("Failed to initialize nrfx driver: %08x", result);
154 return -EIO;
155 }
156
157 dev_data->initialized = true;
158
159 ctx->config = spi_cfg;
160
161 return 0;
162 }
163
finish_transaction(const struct device * dev,int error)164 static void finish_transaction(const struct device *dev, int error)
165 {
166 struct spi_nrfx_data *dev_data = dev->data;
167 struct spi_context *ctx = &dev_data->ctx;
168
169 LOG_DBG("Transaction finished with status %d", error);
170
171 spi_context_complete(ctx, dev, error);
172 dev_data->busy = false;
173 }
174
transfer_next_chunk(const struct device * dev)175 static void transfer_next_chunk(const struct device *dev)
176 {
177 const struct spi_nrfx_config *dev_config = dev->config;
178 struct spi_nrfx_data *dev_data = dev->data;
179 struct spi_context *ctx = &dev_data->ctx;
180 int error = 0;
181
182 size_t chunk_len = spi_context_max_continuous_chunk(ctx);
183
184 if (chunk_len > 0) {
185 nrfx_spi_xfer_desc_t xfer;
186 nrfx_err_t result;
187
188 dev_data->chunk_len = chunk_len;
189
190 xfer.p_tx_buffer = ctx->tx_buf;
191 xfer.tx_length = spi_context_tx_buf_on(ctx) ? chunk_len : 0;
192 xfer.p_rx_buffer = ctx->rx_buf;
193 xfer.rx_length = spi_context_rx_buf_on(ctx) ? chunk_len : 0;
194 result = nrfx_spi_xfer(&dev_config->spi, &xfer, 0);
195 if (result == NRFX_SUCCESS) {
196 return;
197 }
198
199 error = -EIO;
200 }
201
202 finish_transaction(dev, error);
203 }
204
event_handler(const nrfx_spi_evt_t * p_event,void * p_context)205 static void event_handler(const nrfx_spi_evt_t *p_event, void *p_context)
206 {
207 struct spi_nrfx_data *dev_data = p_context;
208
209 if (p_event->type == NRFX_SPI_EVENT_DONE) {
210 /* Chunk length is set to 0 when a transaction is aborted
211 * due to a timeout.
212 */
213 if (dev_data->chunk_len == 0) {
214 finish_transaction(dev_data->dev, -ETIMEDOUT);
215 return;
216 }
217
218 spi_context_update_tx(&dev_data->ctx, 1, dev_data->chunk_len);
219 spi_context_update_rx(&dev_data->ctx, 1, dev_data->chunk_len);
220
221 transfer_next_chunk(dev_data->dev);
222 }
223 }
224
transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)225 static int transceive(const struct device *dev,
226 const struct spi_config *spi_cfg,
227 const struct spi_buf_set *tx_bufs,
228 const struct spi_buf_set *rx_bufs,
229 bool asynchronous,
230 spi_callback_t cb,
231 void *userdata)
232 {
233 struct spi_nrfx_data *dev_data = dev->data;
234 const struct spi_nrfx_config *dev_config = dev->config;
235 int error;
236
237 spi_context_lock(&dev_data->ctx, asynchronous, cb, userdata, spi_cfg);
238
239 error = configure(dev, spi_cfg);
240 if (error == 0) {
241 dev_data->busy = true;
242
243 if (dev_config->wake_pin != WAKE_PIN_NOT_USED) {
244 error = spi_nrfx_wake_request(&dev_config->wake_gpiote,
245 dev_config->wake_pin);
246 if (error == -ETIMEDOUT) {
247 LOG_WRN("Waiting for WAKE acknowledgment timed out");
248 /* If timeout occurs, try to perform the transfer
249 * anyway, just in case the slave device was unable
250 * to signal that it was already awaken and prepared
251 * for the transfer.
252 */
253 }
254 }
255
256 spi_context_buffers_setup(&dev_data->ctx, tx_bufs, rx_bufs, 1);
257 spi_context_cs_control(&dev_data->ctx, true);
258
259 transfer_next_chunk(dev);
260
261 error = spi_context_wait_for_completion(&dev_data->ctx);
262 if (error == -ETIMEDOUT) {
263 /* Set the chunk length to 0 so that event_handler()
264 * knows that the transaction timed out and is to be
265 * aborted.
266 */
267 dev_data->chunk_len = 0;
268 /* Abort the current transfer by deinitializing
269 * the nrfx driver.
270 */
271 nrfx_spi_uninit(&dev_config->spi);
272 dev_data->initialized = false;
273
274 /* Make sure the transaction is finished (it may be
275 * already finished if it actually did complete before
276 * the nrfx driver was deinitialized).
277 */
278 finish_transaction(dev, -ETIMEDOUT);
279
280 /* Clean up the driver state. */
281 k_sem_reset(&dev_data->ctx.sync);
282 }
283
284 spi_context_cs_control(&dev_data->ctx, false);
285 }
286
287 spi_context_release(&dev_data->ctx, error);
288
289 return error;
290 }
291
spi_nrfx_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)292 static int spi_nrfx_transceive(const struct device *dev,
293 const struct spi_config *spi_cfg,
294 const struct spi_buf_set *tx_bufs,
295 const struct spi_buf_set *rx_bufs)
296 {
297 return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL);
298 }
299
300 #ifdef CONFIG_SPI_ASYNC
spi_nrfx_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)301 static int spi_nrfx_transceive_async(const struct device *dev,
302 const struct spi_config *spi_cfg,
303 const struct spi_buf_set *tx_bufs,
304 const struct spi_buf_set *rx_bufs,
305 spi_callback_t cb,
306 void *userdata)
307 {
308 return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata);
309 }
310 #endif /* CONFIG_SPI_ASYNC */
311
spi_nrfx_release(const struct device * dev,const struct spi_config * spi_cfg)312 static int spi_nrfx_release(const struct device *dev,
313 const struct spi_config *spi_cfg)
314 {
315 struct spi_nrfx_data *dev_data = dev->data;
316
317 if (!spi_context_configured(&dev_data->ctx, spi_cfg)) {
318 return -EINVAL;
319 }
320
321 if (dev_data->busy) {
322 return -EBUSY;
323 }
324
325 spi_context_unlock_unconditionally(&dev_data->ctx);
326
327 return 0;
328 }
329
330 static DEVICE_API(spi, spi_nrfx_driver_api) = {
331 .transceive = spi_nrfx_transceive,
332 #ifdef CONFIG_SPI_ASYNC
333 .transceive_async = spi_nrfx_transceive_async,
334 #endif
335 #ifdef CONFIG_SPI_RTIO
336 .iodev_submit = spi_rtio_iodev_default_submit,
337 #endif
338 .release = spi_nrfx_release,
339 };
340
341 #ifdef CONFIG_PM_DEVICE
spi_nrfx_pm_action(const struct device * dev,enum pm_device_action action)342 static int spi_nrfx_pm_action(const struct device *dev,
343 enum pm_device_action action)
344 {
345 int ret = 0;
346 struct spi_nrfx_data *dev_data = dev->data;
347 const struct spi_nrfx_config *dev_config = dev->config;
348
349 switch (action) {
350 case PM_DEVICE_ACTION_RESUME:
351 ret = pinctrl_apply_state(dev_config->pcfg,
352 PINCTRL_STATE_DEFAULT);
353 if (ret < 0) {
354 return ret;
355 }
356 /* nrfx_spi_init() will be called at configuration before
357 * the next transfer.
358 */
359 break;
360
361 case PM_DEVICE_ACTION_SUSPEND:
362 if (dev_data->initialized) {
363 nrfx_spi_uninit(&dev_config->spi);
364 dev_data->initialized = false;
365 }
366
367 ret = pinctrl_apply_state(dev_config->pcfg,
368 PINCTRL_STATE_SLEEP);
369 if (ret < 0) {
370 return ret;
371 }
372 break;
373
374 default:
375 ret = -ENOTSUP;
376 }
377
378 return ret;
379 }
380 #endif /* CONFIG_PM_DEVICE */
381
spi_nrfx_init(const struct device * dev)382 static int spi_nrfx_init(const struct device *dev)
383 {
384 const struct spi_nrfx_config *dev_config = dev->config;
385 struct spi_nrfx_data *dev_data = dev->data;
386 int err;
387
388 err = pinctrl_apply_state(dev_config->pcfg, PINCTRL_STATE_DEFAULT);
389 if (err < 0) {
390 return err;
391 }
392
393 if (dev_config->wake_pin != WAKE_PIN_NOT_USED) {
394 err = spi_nrfx_wake_init(&dev_config->wake_gpiote, dev_config->wake_pin);
395 if (err == -ENODEV) {
396 LOG_ERR("Failed to allocate GPIOTE channel for WAKE");
397 return err;
398 }
399 if (err == -EIO) {
400 LOG_ERR("Failed to configure WAKE pin");
401 return err;
402 }
403 }
404
405 dev_config->irq_connect();
406
407 err = spi_context_cs_configure_all(&dev_data->ctx);
408 if (err < 0) {
409 return err;
410 }
411
412 spi_context_unlock_unconditionally(&dev_data->ctx);
413
414 return 0;
415 }
416
417 /*
418 * Current factors requiring use of DT_NODELABEL:
419 *
420 * - HAL design (requirement of drv_inst_idx in nrfx_spi_t)
421 * - Name-based HAL IRQ handlers, e.g. nrfx_spi_0_irq_handler
422 */
423
424 #define SPI(idx) DT_NODELABEL(spi##idx)
425 #define SPI_PROP(idx, prop) DT_PROP(SPI(idx), prop)
426
427 #define SPI_NRFX_SPI_DEFINE(idx) \
428 NRF_DT_CHECK_NODE_HAS_PINCTRL_SLEEP(SPI(idx)); \
429 static void irq_connect##idx(void) \
430 { \
431 IRQ_CONNECT(DT_IRQN(SPI(idx)), DT_IRQ(SPI(idx), priority), \
432 nrfx_isr, nrfx_spi_##idx##_irq_handler, 0); \
433 } \
434 static struct spi_nrfx_data spi_##idx##_data = { \
435 SPI_CONTEXT_INIT_LOCK(spi_##idx##_data, ctx), \
436 SPI_CONTEXT_INIT_SYNC(spi_##idx##_data, ctx), \
437 SPI_CONTEXT_CS_GPIOS_INITIALIZE(SPI(idx), ctx) \
438 .dev = DEVICE_DT_GET(SPI(idx)), \
439 .busy = false, \
440 }; \
441 PINCTRL_DT_DEFINE(SPI(idx)); \
442 static const struct spi_nrfx_config spi_##idx##z_config = { \
443 .spi = { \
444 .p_reg = (NRF_SPI_Type *)DT_REG_ADDR(SPI(idx)), \
445 .drv_inst_idx = NRFX_SPI##idx##_INST_IDX, \
446 }, \
447 .def_config = { \
448 .skip_gpio_cfg = true, \
449 .skip_psel_cfg = true, \
450 .ss_pin = NRFX_SPI_PIN_NOT_USED, \
451 .orc = SPI_PROP(idx, overrun_character), \
452 }, \
453 .irq_connect = irq_connect##idx, \
454 .pcfg = PINCTRL_DT_DEV_CONFIG_GET(SPI(idx)), \
455 .wake_pin = NRF_DT_GPIOS_TO_PSEL_OR(SPI(idx), wake_gpios, \
456 WAKE_PIN_NOT_USED), \
457 .wake_gpiote = WAKE_GPIOTE_INSTANCE(SPI(idx)), \
458 }; \
459 BUILD_ASSERT(!DT_NODE_HAS_PROP(SPI(idx), wake_gpios) || \
460 !(DT_GPIO_FLAGS(SPI(idx), wake_gpios) & GPIO_ACTIVE_LOW), \
461 "WAKE line must be configured as active high"); \
462 PM_DEVICE_DT_DEFINE(SPI(idx), spi_nrfx_pm_action); \
463 SPI_DEVICE_DT_DEFINE(SPI(idx), \
464 spi_nrfx_init, \
465 PM_DEVICE_DT_GET(SPI(idx)), \
466 &spi_##idx##_data, \
467 &spi_##idx##z_config, \
468 POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \
469 &spi_nrfx_driver_api)
470
471 #ifdef CONFIG_HAS_HW_NRF_SPI0
472 SPI_NRFX_SPI_DEFINE(0);
473 #endif
474
475 #ifdef CONFIG_HAS_HW_NRF_SPI1
476 SPI_NRFX_SPI_DEFINE(1);
477 #endif
478
479 #ifdef CONFIG_HAS_HW_NRF_SPI2
480 SPI_NRFX_SPI_DEFINE(2);
481 #endif
482