1 /*
2  * Copyright (c) 2017 - 2018, Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/drivers/spi.h>
8 #include <zephyr/drivers/spi/rtio.h>
9 #include <zephyr/pm/device.h>
10 #include <zephyr/drivers/pinctrl.h>
11 #include <soc.h>
12 #include <nrfx_spi.h>
13 
14 #include <zephyr/logging/log.h>
15 #include <zephyr/irq.h>
16 LOG_MODULE_REGISTER(spi_nrfx_spi, CONFIG_SPI_LOG_LEVEL);
17 
18 #include "spi_context.h"
19 #include "spi_nrfx_common.h"
20 
21 struct spi_nrfx_data {
22 	struct spi_context ctx;
23 	const struct device *dev;
24 	size_t chunk_len;
25 	bool   busy;
26 	bool   initialized;
27 };
28 
29 struct spi_nrfx_config {
30 	nrfx_spi_t	  spi;
31 	nrfx_spi_config_t def_config;
32 	void (*irq_connect)(void);
33 	const struct pinctrl_dev_config *pcfg;
34 	uint32_t wake_pin;
35 	nrfx_gpiote_t wake_gpiote;
36 };
37 
38 static void event_handler(const nrfx_spi_evt_t *p_event, void *p_context);
39 
get_nrf_spi_frequency(uint32_t frequency)40 static inline nrf_spi_frequency_t get_nrf_spi_frequency(uint32_t frequency)
41 {
42 	/* Get the highest supported frequency not exceeding the requested one.
43 	 */
44 	if (frequency < 250000) {
45 		return NRF_SPI_FREQ_125K;
46 	} else if (frequency < 500000) {
47 		return NRF_SPI_FREQ_250K;
48 	} else if (frequency < 1000000) {
49 		return NRF_SPI_FREQ_500K;
50 	} else if (frequency < 2000000) {
51 		return NRF_SPI_FREQ_1M;
52 	} else if (frequency < 4000000) {
53 		return NRF_SPI_FREQ_2M;
54 	} else if (frequency < 8000000) {
55 		return NRF_SPI_FREQ_4M;
56 	} else {
57 		return NRF_SPI_FREQ_8M;
58 	}
59 }
60 
get_nrf_spi_mode(uint16_t operation)61 static inline nrf_spi_mode_t get_nrf_spi_mode(uint16_t operation)
62 {
63 	if (SPI_MODE_GET(operation) & SPI_MODE_CPOL) {
64 		if (SPI_MODE_GET(operation) & SPI_MODE_CPHA) {
65 			return NRF_SPI_MODE_3;
66 		} else {
67 			return NRF_SPI_MODE_2;
68 		}
69 	} else {
70 		if (SPI_MODE_GET(operation) & SPI_MODE_CPHA) {
71 			return NRF_SPI_MODE_1;
72 		} else {
73 			return NRF_SPI_MODE_0;
74 		}
75 	}
76 }
77 
get_nrf_spi_bit_order(uint16_t operation)78 static inline nrf_spi_bit_order_t get_nrf_spi_bit_order(uint16_t operation)
79 {
80 	if (operation & SPI_TRANSFER_LSB) {
81 		return NRF_SPI_BIT_ORDER_LSB_FIRST;
82 	} else {
83 		return NRF_SPI_BIT_ORDER_MSB_FIRST;
84 	}
85 }
86 
configure(const struct device * dev,const struct spi_config * spi_cfg)87 static int configure(const struct device *dev,
88 		     const struct spi_config *spi_cfg)
89 {
90 	struct spi_nrfx_data *dev_data = dev->data;
91 	const struct spi_nrfx_config *dev_config = dev->config;
92 	struct spi_context *ctx = &dev_data->ctx;
93 	nrfx_spi_config_t config;
94 	nrfx_err_t result;
95 
96 	if (dev_data->initialized && spi_context_configured(ctx, spi_cfg)) {
97 		/* Already configured. No need to do it again. */
98 		return 0;
99 	}
100 
101 	if (spi_cfg->operation & SPI_HALF_DUPLEX) {
102 		LOG_ERR("Half-duplex not supported");
103 		return -ENOTSUP;
104 	}
105 
106 	if (SPI_OP_MODE_GET(spi_cfg->operation) != SPI_OP_MODE_MASTER) {
107 		LOG_ERR("Slave mode is not supported on %s", dev->name);
108 		return -EINVAL;
109 	}
110 
111 	if (spi_cfg->operation & SPI_MODE_LOOP) {
112 		LOG_ERR("Loopback mode is not supported");
113 		return -EINVAL;
114 	}
115 
116 	if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) &&
117 	    (spi_cfg->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) {
118 		LOG_ERR("Only single line mode is supported");
119 		return -EINVAL;
120 	}
121 
122 	if (SPI_WORD_SIZE_GET(spi_cfg->operation) != 8) {
123 		LOG_ERR("Word sizes other than 8 bits are not supported");
124 		return -EINVAL;
125 	}
126 
127 	if (spi_cfg->frequency < 125000) {
128 		LOG_ERR("Frequencies lower than 125 kHz are not supported");
129 		return -EINVAL;
130 	}
131 
132 	config = dev_config->def_config;
133 
134 	config.frequency = get_nrf_spi_frequency(spi_cfg->frequency);
135 	config.mode      = get_nrf_spi_mode(spi_cfg->operation);
136 	config.bit_order = get_nrf_spi_bit_order(spi_cfg->operation);
137 
138 	nrf_gpio_pin_write(nrf_spi_sck_pin_get(dev_config->spi.p_reg),
139 			   spi_cfg->operation & SPI_MODE_CPOL ? 1 : 0);
140 
141 	if (dev_data->initialized) {
142 		nrfx_spi_uninit(&dev_config->spi);
143 		dev_data->initialized = false;
144 	}
145 
146 	result = nrfx_spi_init(&dev_config->spi, &config,
147 			       event_handler, dev_data);
148 	if (result != NRFX_SUCCESS) {
149 		LOG_ERR("Failed to initialize nrfx driver: %08x", result);
150 		return -EIO;
151 	}
152 
153 	dev_data->initialized = true;
154 
155 	ctx->config = spi_cfg;
156 
157 	return 0;
158 }
159 
finish_transaction(const struct device * dev,int error)160 static void finish_transaction(const struct device *dev, int error)
161 {
162 	struct spi_nrfx_data *dev_data = dev->data;
163 	struct spi_context *ctx = &dev_data->ctx;
164 
165 	LOG_DBG("Transaction finished with status %d", error);
166 
167 	spi_context_complete(ctx, dev, error);
168 	dev_data->busy = false;
169 }
170 
transfer_next_chunk(const struct device * dev)171 static void transfer_next_chunk(const struct device *dev)
172 {
173 	const struct spi_nrfx_config *dev_config = dev->config;
174 	struct spi_nrfx_data *dev_data = dev->data;
175 	struct spi_context *ctx = &dev_data->ctx;
176 	int error = 0;
177 
178 	size_t chunk_len = spi_context_max_continuous_chunk(ctx);
179 
180 	if (chunk_len > 0) {
181 		nrfx_spi_xfer_desc_t xfer;
182 		nrfx_err_t result;
183 
184 		dev_data->chunk_len = chunk_len;
185 
186 		xfer.p_tx_buffer = ctx->tx_buf;
187 		xfer.tx_length   = spi_context_tx_buf_on(ctx) ? chunk_len : 0;
188 		xfer.p_rx_buffer = ctx->rx_buf;
189 		xfer.rx_length   = spi_context_rx_buf_on(ctx) ? chunk_len : 0;
190 		result = nrfx_spi_xfer(&dev_config->spi, &xfer, 0);
191 		if (result == NRFX_SUCCESS) {
192 			return;
193 		}
194 
195 		error = -EIO;
196 	}
197 
198 	finish_transaction(dev, error);
199 }
200 
event_handler(const nrfx_spi_evt_t * p_event,void * p_context)201 static void event_handler(const nrfx_spi_evt_t *p_event, void *p_context)
202 {
203 	struct spi_nrfx_data *dev_data = p_context;
204 
205 	if (p_event->type == NRFX_SPI_EVENT_DONE) {
206 		/* Chunk length is set to 0 when a transaction is aborted
207 		 * due to a timeout.
208 		 */
209 		if (dev_data->chunk_len == 0) {
210 			finish_transaction(dev_data->dev, -ETIMEDOUT);
211 			return;
212 		}
213 
214 		spi_context_update_tx(&dev_data->ctx, 1, dev_data->chunk_len);
215 		spi_context_update_rx(&dev_data->ctx, 1, dev_data->chunk_len);
216 
217 		transfer_next_chunk(dev_data->dev);
218 	}
219 }
220 
transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)221 static int transceive(const struct device *dev,
222 		      const struct spi_config *spi_cfg,
223 		      const struct spi_buf_set *tx_bufs,
224 		      const struct spi_buf_set *rx_bufs,
225 		      bool asynchronous,
226 		      spi_callback_t cb,
227 		      void *userdata)
228 {
229 	struct spi_nrfx_data *dev_data = dev->data;
230 	const struct spi_nrfx_config *dev_config = dev->config;
231 	int error;
232 
233 	spi_context_lock(&dev_data->ctx, asynchronous, cb, userdata, spi_cfg);
234 
235 	error = configure(dev, spi_cfg);
236 	if (error == 0) {
237 		dev_data->busy = true;
238 
239 		if (dev_config->wake_pin != WAKE_PIN_NOT_USED) {
240 			error = spi_nrfx_wake_request(&dev_config->wake_gpiote,
241 						      dev_config->wake_pin);
242 			if (error == -ETIMEDOUT) {
243 				LOG_WRN("Waiting for WAKE acknowledgment timed out");
244 				/* If timeout occurs, try to perform the transfer
245 				 * anyway, just in case the slave device was unable
246 				 * to signal that it was already awaken and prepared
247 				 * for the transfer.
248 				 */
249 			}
250 		}
251 
252 		spi_context_buffers_setup(&dev_data->ctx, tx_bufs, rx_bufs, 1);
253 		spi_context_cs_control(&dev_data->ctx, true);
254 
255 		transfer_next_chunk(dev);
256 
257 		error = spi_context_wait_for_completion(&dev_data->ctx);
258 		if (error == -ETIMEDOUT) {
259 			/* Set the chunk length to 0 so that event_handler()
260 			 * knows that the transaction timed out and is to be
261 			 * aborted.
262 			 */
263 			dev_data->chunk_len = 0;
264 			/* Abort the current transfer by deinitializing
265 			 * the nrfx driver.
266 			 */
267 			nrfx_spi_uninit(&dev_config->spi);
268 			dev_data->initialized = false;
269 
270 			/* Make sure the transaction is finished (it may be
271 			 * already finished if it actually did complete before
272 			 * the nrfx driver was deinitialized).
273 			 */
274 			finish_transaction(dev, -ETIMEDOUT);
275 
276 			/* Clean up the driver state. */
277 			k_sem_reset(&dev_data->ctx.sync);
278 		}
279 
280 		spi_context_cs_control(&dev_data->ctx, false);
281 	}
282 
283 	spi_context_release(&dev_data->ctx, error);
284 
285 	return error;
286 }
287 
spi_nrfx_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)288 static int spi_nrfx_transceive(const struct device *dev,
289 			       const struct spi_config *spi_cfg,
290 			       const struct spi_buf_set *tx_bufs,
291 			       const struct spi_buf_set *rx_bufs)
292 {
293 	return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL);
294 }
295 
296 #ifdef CONFIG_SPI_ASYNC
spi_nrfx_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)297 static int spi_nrfx_transceive_async(const struct device *dev,
298 				     const struct spi_config *spi_cfg,
299 				     const struct spi_buf_set *tx_bufs,
300 				     const struct spi_buf_set *rx_bufs,
301 				     spi_callback_t cb,
302 				     void *userdata)
303 {
304 	return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata);
305 }
306 #endif /* CONFIG_SPI_ASYNC */
307 
spi_nrfx_release(const struct device * dev,const struct spi_config * spi_cfg)308 static int spi_nrfx_release(const struct device *dev,
309 			    const struct spi_config *spi_cfg)
310 {
311 	struct spi_nrfx_data *dev_data = dev->data;
312 
313 	if (!spi_context_configured(&dev_data->ctx, spi_cfg)) {
314 		return -EINVAL;
315 	}
316 
317 	if (dev_data->busy) {
318 		return -EBUSY;
319 	}
320 
321 	spi_context_unlock_unconditionally(&dev_data->ctx);
322 
323 	return 0;
324 }
325 
326 static DEVICE_API(spi, spi_nrfx_driver_api) = {
327 	.transceive = spi_nrfx_transceive,
328 #ifdef CONFIG_SPI_ASYNC
329 	.transceive_async = spi_nrfx_transceive_async,
330 #endif
331 #ifdef CONFIG_SPI_RTIO
332 	.iodev_submit = spi_rtio_iodev_default_submit,
333 #endif
334 	.release = spi_nrfx_release,
335 };
336 
337 #ifdef CONFIG_PM_DEVICE
spi_nrfx_pm_action(const struct device * dev,enum pm_device_action action)338 static int spi_nrfx_pm_action(const struct device *dev,
339 			      enum pm_device_action action)
340 {
341 	int ret = 0;
342 	struct spi_nrfx_data *dev_data = dev->data;
343 	const struct spi_nrfx_config *dev_config = dev->config;
344 
345 	switch (action) {
346 	case PM_DEVICE_ACTION_RESUME:
347 		ret = pinctrl_apply_state(dev_config->pcfg,
348 					  PINCTRL_STATE_DEFAULT);
349 		if (ret < 0) {
350 			return ret;
351 		}
352 		/* nrfx_spi_init() will be called at configuration before
353 		 * the next transfer.
354 		 */
355 		break;
356 
357 	case PM_DEVICE_ACTION_SUSPEND:
358 		if (dev_data->initialized) {
359 			nrfx_spi_uninit(&dev_config->spi);
360 			dev_data->initialized = false;
361 		}
362 
363 		ret = pinctrl_apply_state(dev_config->pcfg,
364 					  PINCTRL_STATE_SLEEP);
365 		if (ret < 0) {
366 			return ret;
367 		}
368 		break;
369 
370 	default:
371 		ret = -ENOTSUP;
372 	}
373 
374 	return ret;
375 }
376 #endif /* CONFIG_PM_DEVICE */
377 
spi_nrfx_init(const struct device * dev)378 static int spi_nrfx_init(const struct device *dev)
379 {
380 	const struct spi_nrfx_config *dev_config = dev->config;
381 	struct spi_nrfx_data *dev_data = dev->data;
382 	int err;
383 
384 	err = pinctrl_apply_state(dev_config->pcfg, PINCTRL_STATE_DEFAULT);
385 	if (err < 0) {
386 		return err;
387 	}
388 
389 	if (dev_config->wake_pin != WAKE_PIN_NOT_USED) {
390 		err = spi_nrfx_wake_init(&dev_config->wake_gpiote, dev_config->wake_pin);
391 		if (err == -ENODEV) {
392 			LOG_ERR("Failed to allocate GPIOTE channel for WAKE");
393 			return err;
394 		}
395 		if (err == -EIO) {
396 			LOG_ERR("Failed to configure WAKE pin");
397 			return err;
398 		}
399 	}
400 
401 	dev_config->irq_connect();
402 
403 	err = spi_context_cs_configure_all(&dev_data->ctx);
404 	if (err < 0) {
405 		return err;
406 	}
407 
408 	spi_context_unlock_unconditionally(&dev_data->ctx);
409 
410 	return 0;
411 }
412 
413 /*
414  * Current factors requiring use of DT_NODELABEL:
415  *
416  * - HAL design (requirement of drv_inst_idx in nrfx_spi_t)
417  * - Name-based HAL IRQ handlers, e.g. nrfx_spi_0_irq_handler
418  */
419 
420 #define SPI(idx)			DT_NODELABEL(spi##idx)
421 #define SPI_PROP(idx, prop)		DT_PROP(SPI(idx), prop)
422 
423 #define SPI_NRFX_SPI_DEFINE(idx)					       \
424 	NRF_DT_CHECK_NODE_HAS_PINCTRL_SLEEP(SPI(idx));			       \
425 	static void irq_connect##idx(void)				       \
426 	{								       \
427 		IRQ_CONNECT(DT_IRQN(SPI(idx)), DT_IRQ(SPI(idx), priority),     \
428 			    nrfx_isr, nrfx_spi_##idx##_irq_handler, 0);	       \
429 	}								       \
430 	static struct spi_nrfx_data spi_##idx##_data = {		       \
431 		SPI_CONTEXT_INIT_LOCK(spi_##idx##_data, ctx),		       \
432 		SPI_CONTEXT_INIT_SYNC(spi_##idx##_data, ctx),		       \
433 		SPI_CONTEXT_CS_GPIOS_INITIALIZE(SPI(idx), ctx)		       \
434 		.dev  = DEVICE_DT_GET(SPI(idx)),			       \
435 		.busy = false,						       \
436 	};								       \
437 	PINCTRL_DT_DEFINE(SPI(idx));					       \
438 	static const struct spi_nrfx_config spi_##idx##z_config = {	       \
439 		.spi = {						       \
440 			.p_reg = (NRF_SPI_Type *)DT_REG_ADDR(SPI(idx)),	       \
441 			.drv_inst_idx = NRFX_SPI##idx##_INST_IDX,	       \
442 		},							       \
443 		.def_config = {						       \
444 			.skip_gpio_cfg = true,				       \
445 			.skip_psel_cfg = true,				       \
446 			.ss_pin = NRFX_SPI_PIN_NOT_USED,		       \
447 			.orc    = SPI_PROP(idx, overrun_character),	       \
448 		},							       \
449 		.irq_connect = irq_connect##idx,			       \
450 		.pcfg = PINCTRL_DT_DEV_CONFIG_GET(SPI(idx)),		       \
451 		.wake_pin = NRF_DT_GPIOS_TO_PSEL_OR(SPI(idx), wake_gpios,      \
452 						    WAKE_PIN_NOT_USED),	       \
453 		.wake_gpiote = WAKE_GPIOTE_INSTANCE(SPI(idx)),		       \
454 	};								       \
455 	BUILD_ASSERT(!DT_NODE_HAS_PROP(SPI(idx), wake_gpios) ||		       \
456 		     !(DT_GPIO_FLAGS(SPI(idx), wake_gpios) & GPIO_ACTIVE_LOW), \
457 		     "WAKE line must be configured as active high");	       \
458 	PM_DEVICE_DT_DEFINE(SPI(idx), spi_nrfx_pm_action);		       \
459 	SPI_DEVICE_DT_DEFINE(SPI(idx),					       \
460 		      spi_nrfx_init,					       \
461 		      PM_DEVICE_DT_GET(SPI(idx)),			       \
462 		      &spi_##idx##_data,				       \
463 		      &spi_##idx##z_config,				       \
464 		      POST_KERNEL, CONFIG_SPI_INIT_PRIORITY,		       \
465 		      &spi_nrfx_driver_api)
466 
467 #ifdef CONFIG_HAS_HW_NRF_SPI0
468 SPI_NRFX_SPI_DEFINE(0);
469 #endif
470 
471 #ifdef CONFIG_HAS_HW_NRF_SPI1
472 SPI_NRFX_SPI_DEFINE(1);
473 #endif
474 
475 #ifdef CONFIG_HAS_HW_NRF_SPI2
476 SPI_NRFX_SPI_DEFINE(2);
477 #endif
478