1 /*
2 * Copyright (c) 2018, Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/drivers/spi.h>
8 #include <zephyr/drivers/spi/rtio.h>
9 #include <zephyr/drivers/pinctrl.h>
10 #include <zephyr/drivers/gpio.h>
11 #include <soc.h>
12 #include <nrfx_spis.h>
13
14 #include <zephyr/logging/log.h>
15 #include <zephyr/irq.h>
16 LOG_MODULE_REGISTER(spi_nrfx_spis, CONFIG_SPI_LOG_LEVEL);
17
18 #include "spi_context.h"
19
20 struct spi_nrfx_data {
21 struct spi_context ctx;
22 const struct device *dev;
23 struct k_sem wake_sem;
24 struct gpio_callback wake_cb_data;
25 };
26
27 struct spi_nrfx_config {
28 nrfx_spis_t spis;
29 nrfx_spis_config_t config;
30 void (*irq_connect)(void);
31 uint16_t max_buf_len;
32 const struct pinctrl_dev_config *pcfg;
33 struct gpio_dt_spec wake_gpio;
34 };
35
get_nrf_spis_mode(uint16_t operation)36 static inline nrf_spis_mode_t get_nrf_spis_mode(uint16_t operation)
37 {
38 if (SPI_MODE_GET(operation) & SPI_MODE_CPOL) {
39 if (SPI_MODE_GET(operation) & SPI_MODE_CPHA) {
40 return NRF_SPIS_MODE_3;
41 } else {
42 return NRF_SPIS_MODE_2;
43 }
44 } else {
45 if (SPI_MODE_GET(operation) & SPI_MODE_CPHA) {
46 return NRF_SPIS_MODE_1;
47 } else {
48 return NRF_SPIS_MODE_0;
49 }
50 }
51 }
52
get_nrf_spis_bit_order(uint16_t operation)53 static inline nrf_spis_bit_order_t get_nrf_spis_bit_order(uint16_t operation)
54 {
55 if (operation & SPI_TRANSFER_LSB) {
56 return NRF_SPIS_BIT_ORDER_LSB_FIRST;
57 } else {
58 return NRF_SPIS_BIT_ORDER_MSB_FIRST;
59 }
60 }
61
configure(const struct device * dev,const struct spi_config * spi_cfg)62 static int configure(const struct device *dev,
63 const struct spi_config *spi_cfg)
64 {
65 const struct spi_nrfx_config *dev_config = dev->config;
66 struct spi_nrfx_data *dev_data = dev->data;
67 struct spi_context *ctx = &dev_data->ctx;
68
69 if (spi_context_configured(ctx, spi_cfg)) {
70 /* Already configured. No need to do it again. */
71 return 0;
72 }
73
74 if (spi_cfg->operation & SPI_HALF_DUPLEX) {
75 LOG_ERR("Half-duplex not supported");
76 return -ENOTSUP;
77 }
78
79 if (SPI_OP_MODE_GET(spi_cfg->operation) == SPI_OP_MODE_MASTER) {
80 LOG_ERR("Master mode is not supported on %s", dev->name);
81 return -EINVAL;
82 }
83
84 if (spi_cfg->operation & SPI_MODE_LOOP) {
85 LOG_ERR("Loopback mode is not supported");
86 return -EINVAL;
87 }
88
89 if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) &&
90 (spi_cfg->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) {
91 LOG_ERR("Only single line mode is supported");
92 return -EINVAL;
93 }
94
95 if (SPI_WORD_SIZE_GET(spi_cfg->operation) != 8) {
96 LOG_ERR("Word sizes other than 8 bits are not supported");
97 return -EINVAL;
98 }
99
100 if (spi_cs_is_gpio(spi_cfg)) {
101 LOG_ERR("CS control via GPIO is not supported");
102 return -EINVAL;
103 }
104
105 ctx->config = spi_cfg;
106
107 nrf_spis_configure(dev_config->spis.p_reg,
108 get_nrf_spis_mode(spi_cfg->operation),
109 get_nrf_spis_bit_order(spi_cfg->operation));
110
111 return 0;
112 }
113
prepare_for_transfer(const struct device * dev,const uint8_t * tx_buf,size_t tx_buf_len,uint8_t * rx_buf,size_t rx_buf_len)114 static int prepare_for_transfer(const struct device *dev,
115 const uint8_t *tx_buf, size_t tx_buf_len,
116 uint8_t *rx_buf, size_t rx_buf_len)
117 {
118 const struct spi_nrfx_config *dev_config = dev->config;
119 nrfx_err_t result;
120
121 if (tx_buf_len > dev_config->max_buf_len ||
122 rx_buf_len > dev_config->max_buf_len) {
123 LOG_ERR("Invalid buffer sizes: Tx %d/Rx %d",
124 tx_buf_len, rx_buf_len);
125 return -EINVAL;
126 }
127
128 result = nrfx_spis_buffers_set(&dev_config->spis,
129 tx_buf, tx_buf_len,
130 rx_buf, rx_buf_len);
131 if (result != NRFX_SUCCESS) {
132 return -EIO;
133 }
134
135 return 0;
136 }
137
wake_callback(const struct device * dev,struct gpio_callback * cb,uint32_t pins)138 static void wake_callback(const struct device *dev, struct gpio_callback *cb,
139 uint32_t pins)
140 {
141 struct spi_nrfx_data *dev_data =
142 CONTAINER_OF(cb, struct spi_nrfx_data, wake_cb_data);
143 const struct spi_nrfx_config *dev_config = dev_data->dev->config;
144
145 (void)gpio_pin_interrupt_configure_dt(&dev_config->wake_gpio,
146 GPIO_INT_DISABLE);
147 k_sem_give(&dev_data->wake_sem);
148 }
149
wait_for_wake(struct spi_nrfx_data * dev_data,const struct spi_nrfx_config * dev_config)150 static void wait_for_wake(struct spi_nrfx_data *dev_data,
151 const struct spi_nrfx_config *dev_config)
152 {
153 /* If the WAKE line is low, wait until it goes high - this is a signal
154 * from the master that it wants to perform a transfer.
155 */
156 if (gpio_pin_get_raw(dev_config->wake_gpio.port,
157 dev_config->wake_gpio.pin) == 0) {
158 (void)gpio_pin_interrupt_configure_dt(&dev_config->wake_gpio,
159 GPIO_INT_LEVEL_HIGH);
160 (void)k_sem_take(&dev_data->wake_sem, K_FOREVER);
161 }
162 }
163
transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)164 static int transceive(const struct device *dev,
165 const struct spi_config *spi_cfg,
166 const struct spi_buf_set *tx_bufs,
167 const struct spi_buf_set *rx_bufs,
168 bool asynchronous,
169 spi_callback_t cb,
170 void *userdata)
171 {
172 struct spi_nrfx_data *dev_data = dev->data;
173 const struct spi_nrfx_config *dev_config = dev->config;
174 const struct spi_buf *tx_buf = tx_bufs ? tx_bufs->buffers : NULL;
175 const struct spi_buf *rx_buf = rx_bufs ? rx_bufs->buffers : NULL;
176 int error;
177
178 spi_context_lock(&dev_data->ctx, asynchronous, cb, userdata, spi_cfg);
179
180 error = configure(dev, spi_cfg);
181 if (error != 0) {
182 /* Invalid configuration. */
183 } else if ((tx_bufs && tx_bufs->count > 1) ||
184 (rx_bufs && rx_bufs->count > 1)) {
185 LOG_ERR("Scattered buffers are not supported");
186 error = -ENOTSUP;
187 } else if (tx_buf && tx_buf->len && !nrfx_is_in_ram(tx_buf->buf)) {
188 LOG_ERR("Only buffers located in RAM are supported");
189 error = -ENOTSUP;
190 } else {
191 if (dev_config->wake_gpio.port) {
192 wait_for_wake(dev_data, dev_config);
193
194 nrf_spis_enable(dev_config->spis.p_reg);
195 }
196
197 error = prepare_for_transfer(dev,
198 tx_buf ? tx_buf->buf : NULL,
199 tx_buf ? tx_buf->len : 0,
200 rx_buf ? rx_buf->buf : NULL,
201 rx_buf ? rx_buf->len : 0);
202 if (error == 0) {
203 if (dev_config->wake_gpio.port) {
204 /* Set the WAKE line low (tie it to ground)
205 * to signal readiness to handle the transfer.
206 */
207 gpio_pin_set_raw(dev_config->wake_gpio.port,
208 dev_config->wake_gpio.pin,
209 0);
210 /* Set the WAKE line back high (i.e. disconnect
211 * output for its pin since it's configured in
212 * open drain mode) so that it can be controlled
213 * by the other side again.
214 */
215 gpio_pin_set_raw(dev_config->wake_gpio.port,
216 dev_config->wake_gpio.pin,
217 1);
218 }
219
220 error = spi_context_wait_for_completion(&dev_data->ctx);
221 }
222
223 if (dev_config->wake_gpio.port) {
224 nrf_spis_disable(dev_config->spis.p_reg);
225 }
226 }
227
228 spi_context_release(&dev_data->ctx, error);
229
230 return error;
231 }
232
spi_nrfx_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)233 static int spi_nrfx_transceive(const struct device *dev,
234 const struct spi_config *spi_cfg,
235 const struct spi_buf_set *tx_bufs,
236 const struct spi_buf_set *rx_bufs)
237 {
238 return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL);
239 }
240
241 #ifdef CONFIG_SPI_ASYNC
spi_nrfx_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)242 static int spi_nrfx_transceive_async(const struct device *dev,
243 const struct spi_config *spi_cfg,
244 const struct spi_buf_set *tx_bufs,
245 const struct spi_buf_set *rx_bufs,
246 spi_callback_t cb,
247 void *userdata)
248 {
249 return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata);
250 }
251 #endif /* CONFIG_SPI_ASYNC */
252
spi_nrfx_release(const struct device * dev,const struct spi_config * spi_cfg)253 static int spi_nrfx_release(const struct device *dev,
254 const struct spi_config *spi_cfg)
255 {
256 struct spi_nrfx_data *dev_data = dev->data;
257
258 if (!spi_context_configured(&dev_data->ctx, spi_cfg)) {
259 return -EINVAL;
260 }
261
262 spi_context_unlock_unconditionally(&dev_data->ctx);
263
264 return 0;
265 }
266
267 static DEVICE_API(spi, spi_nrfx_driver_api) = {
268 .transceive = spi_nrfx_transceive,
269 #ifdef CONFIG_SPI_ASYNC
270 .transceive_async = spi_nrfx_transceive_async,
271 #endif
272 #ifdef CONFIG_SPI_RTIO
273 .iodev_submit = spi_rtio_iodev_default_submit,
274 #endif
275 .release = spi_nrfx_release,
276 };
277
event_handler(const nrfx_spis_evt_t * p_event,void * p_context)278 static void event_handler(const nrfx_spis_evt_t *p_event, void *p_context)
279 {
280 struct spi_nrfx_data *dev_data = p_context;
281
282 if (p_event->evt_type == NRFX_SPIS_XFER_DONE) {
283 spi_context_complete(&dev_data->ctx, dev_data->dev,
284 p_event->rx_amount);
285 }
286 }
287
spi_nrfx_init(const struct device * dev)288 static int spi_nrfx_init(const struct device *dev)
289 {
290 const struct spi_nrfx_config *dev_config = dev->config;
291 struct spi_nrfx_data *dev_data = dev->data;
292 nrfx_err_t result;
293 int err;
294
295 err = pinctrl_apply_state(dev_config->pcfg, PINCTRL_STATE_DEFAULT);
296 if (err < 0) {
297 return err;
298 }
299
300 /* This sets only default values of mode and bit order. The ones to be
301 * actually used are set in configure() when a transfer is prepared.
302 */
303 result = nrfx_spis_init(&dev_config->spis, &dev_config->config,
304 event_handler, dev_data);
305
306 if (result != NRFX_SUCCESS) {
307 LOG_ERR("Failed to initialize device: %s", dev->name);
308 return -EBUSY;
309 }
310
311 if (dev_config->wake_gpio.port) {
312 if (!gpio_is_ready_dt(&dev_config->wake_gpio)) {
313 return -ENODEV;
314 }
315
316 /* In open drain mode, the output is disconnected when set to
317 * the high state, so the following will effectively configure
318 * the pin as an input only.
319 */
320 err = gpio_pin_configure_dt(&dev_config->wake_gpio,
321 GPIO_INPUT |
322 GPIO_OUTPUT_HIGH |
323 GPIO_OPEN_DRAIN);
324 if (err < 0) {
325 return err;
326 }
327
328 gpio_init_callback(&dev_data->wake_cb_data, wake_callback,
329 BIT(dev_config->wake_gpio.pin));
330 err = gpio_add_callback(dev_config->wake_gpio.port,
331 &dev_data->wake_cb_data);
332 if (err < 0) {
333 return err;
334 }
335
336 /* When the WAKE line is used, the SPIS peripheral is enabled
337 * only after the master signals that it wants to perform a
338 * transfer and it is disabled right after the transfer is done.
339 * Waiting for the WAKE line to go high, what can be done using
340 * the GPIO PORT event, instead of just waiting for the transfer
341 * with the SPIS peripheral enabled, significantly reduces idle
342 * power consumption.
343 */
344 nrf_spis_disable(dev_config->spis.p_reg);
345 }
346
347 spi_context_unlock_unconditionally(&dev_data->ctx);
348
349 return 0;
350 }
351
352 /*
353 * Current factors requiring use of DT_NODELABEL:
354 *
355 * - HAL design (requirement of drv_inst_idx in nrfx_spis_t)
356 * - Name-based HAL IRQ handlers, e.g. nrfx_spis_0_irq_handler
357 */
358
359 #define SPIS(idx) DT_NODELABEL(spi##idx)
360 #define SPIS_PROP(idx, prop) DT_PROP(SPIS(idx), prop)
361
362 #define SPI_NRFX_SPIS_DEFINE(idx) \
363 static void irq_connect##idx(void) \
364 { \
365 IRQ_CONNECT(DT_IRQN(SPIS(idx)), DT_IRQ(SPIS(idx), priority), \
366 nrfx_isr, nrfx_spis_##idx##_irq_handler, 0); \
367 } \
368 static struct spi_nrfx_data spi_##idx##_data = { \
369 SPI_CONTEXT_INIT_LOCK(spi_##idx##_data, ctx), \
370 SPI_CONTEXT_INIT_SYNC(spi_##idx##_data, ctx), \
371 .dev = DEVICE_DT_GET(SPIS(idx)), \
372 .wake_sem = Z_SEM_INITIALIZER( \
373 spi_##idx##_data.wake_sem, 0, 1), \
374 }; \
375 PINCTRL_DT_DEFINE(SPIS(idx)); \
376 static const struct spi_nrfx_config spi_##idx##z_config = { \
377 .spis = { \
378 .p_reg = (NRF_SPIS_Type *)DT_REG_ADDR(SPIS(idx)), \
379 .drv_inst_idx = NRFX_SPIS##idx##_INST_IDX, \
380 }, \
381 .config = { \
382 .skip_gpio_cfg = true, \
383 .skip_psel_cfg = true, \
384 .mode = NRF_SPIS_MODE_0, \
385 .bit_order = NRF_SPIS_BIT_ORDER_MSB_FIRST, \
386 .orc = SPIS_PROP(idx, overrun_character), \
387 .def = SPIS_PROP(idx, def_char), \
388 }, \
389 .irq_connect = irq_connect##idx, \
390 .pcfg = PINCTRL_DT_DEV_CONFIG_GET(SPIS(idx)), \
391 .max_buf_len = BIT_MASK(SPIS_PROP(idx, easydma_maxcnt_bits)), \
392 .wake_gpio = GPIO_DT_SPEC_GET_OR(SPIS(idx), wake_gpios, {0}), \
393 }; \
394 BUILD_ASSERT(!DT_NODE_HAS_PROP(SPIS(idx), wake_gpios) || \
395 !(DT_GPIO_FLAGS(SPIS(idx), wake_gpios) & GPIO_ACTIVE_LOW),\
396 "WAKE line must be configured as active high"); \
397 SPI_DEVICE_DT_DEFINE(SPIS(idx), \
398 spi_nrfx_init, \
399 NULL, \
400 &spi_##idx##_data, \
401 &spi_##idx##z_config, \
402 POST_KERNEL, \
403 CONFIG_SPI_INIT_PRIORITY, \
404 &spi_nrfx_driver_api)
405
406 /* Macro creates device instance if it is enabled in devicetree. */
407 #define SPIS_DEVICE(periph, prefix, id, _) \
408 IF_ENABLED(CONFIG_HAS_HW_NRF_SPIS##prefix##id, (SPI_NRFX_SPIS_DEFINE(prefix##id);))
409
410 /* Macro iterates over nrfx_spis instances enabled in the nrfx_config.h. */
411 NRFX_FOREACH_ENABLED(SPIS, SPIS_DEVICE, (), (), _)
412