1 /*
2  * Copyright (c) 2017 - 2018, Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/drivers/spi.h>
8 #include <zephyr/cache.h>
9 #include <zephyr/pm/device.h>
10 #include <zephyr/drivers/pinctrl.h>
11 #include <zephyr/mem_mgmt/mem_attr.h>
12 #include <soc.h>
13 #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58
14 #include <nrfx_ppi.h>
15 #endif
16 #ifdef CONFIG_SOC_NRF5340_CPUAPP
17 #include <hal/nrf_clock.h>
18 #endif
19 #include <nrfx_spim.h>
20 #include <string.h>
21 #include <zephyr/linker/devicetree_regions.h>
22 
23 #include <zephyr/logging/log.h>
24 #include <zephyr/irq.h>
25 LOG_MODULE_REGISTER(spi_nrfx_spim, CONFIG_SPI_LOG_LEVEL);
26 
27 #include "spi_context.h"
28 #include "spi_nrfx_common.h"
29 
30 #if defined(CONFIG_SOC_NRF52832) && !defined(CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58)
31 #error  This driver is not available by default for nRF52832 because of Product Anomaly 58 \
32 	(SPIM: An additional byte is clocked out when RXD.MAXCNT == 1 and TXD.MAXCNT <= 1). \
33 	Use CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58=y to override this limitation.
34 #endif
35 
36 #if (CONFIG_SPI_NRFX_RAM_BUFFER_SIZE > 0)
37 #define SPI_BUFFER_IN_RAM 1
38 #endif
39 
40 struct spi_nrfx_data {
41 	struct spi_context ctx;
42 	const struct device *dev;
43 	size_t  chunk_len;
44 	bool    busy;
45 	bool    initialized;
46 #ifdef SPI_BUFFER_IN_RAM
47 	uint8_t *tx_buffer;
48 	uint8_t *rx_buffer;
49 #endif
50 #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58
51 	bool    anomaly_58_workaround_active;
52 	uint8_t ppi_ch;
53 	uint8_t gpiote_ch;
54 #endif
55 };
56 
57 struct spi_nrfx_config {
58 	nrfx_spim_t	   spim;
59 	uint32_t	   max_freq;
60 	nrfx_spim_config_t def_config;
61 	void (*irq_connect)(void);
62 	uint16_t max_chunk_len;
63 	const struct pinctrl_dev_config *pcfg;
64 #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58
65 	bool anomaly_58_workaround;
66 #endif
67 	uint32_t wake_pin;
68 	nrfx_gpiote_t wake_gpiote;
69 #ifdef CONFIG_DCACHE
70 	uint32_t mem_attr;
71 #endif
72 };
73 
74 static void event_handler(const nrfx_spim_evt_t *p_event, void *p_context);
75 
finalize_spi_transaction(const struct device * dev,bool deactivate_cs)76 static inline void finalize_spi_transaction(const struct device *dev, bool deactivate_cs)
77 {
78 	struct spi_nrfx_data *dev_data = dev->data;
79 	const struct spi_nrfx_config *dev_config = dev->config;
80 	void *reg = dev_config->spim.p_reg;
81 
82 	if (deactivate_cs) {
83 		spi_context_cs_control(&dev_data->ctx, false);
84 	}
85 
86 	if (NRF_SPIM_IS_320MHZ_SPIM(reg) && !(dev_data->ctx.config->operation & SPI_HOLD_ON_CS)) {
87 		nrfy_spim_disable(reg);
88 	}
89 }
90 
get_nrf_spim_frequency(uint32_t frequency)91 static inline uint32_t get_nrf_spim_frequency(uint32_t frequency)
92 {
93 	/* Get the highest supported frequency not exceeding the requested one.
94 	 */
95 	if (frequency >= MHZ(32) && (NRF_SPIM_HAS_32_MHZ_FREQ || NRF_SPIM_HAS_PRESCALER)) {
96 		return MHZ(32);
97 	} else if (frequency >= MHZ(16) && (NRF_SPIM_HAS_16_MHZ_FREQ || NRF_SPIM_HAS_PRESCALER)) {
98 		return MHZ(16);
99 	} else if (frequency >= MHZ(8)) {
100 		return MHZ(8);
101 	} else if (frequency >= MHZ(4)) {
102 		return MHZ(4);
103 	} else if (frequency >= MHZ(2)) {
104 		return MHZ(2);
105 	} else if (frequency >= MHZ(1)) {
106 		return MHZ(1);
107 	} else if (frequency >= KHZ(500)) {
108 		return KHZ(500);
109 	} else if (frequency >= KHZ(250)) {
110 		return KHZ(250);
111 	} else {
112 		return KHZ(125);
113 	}
114 }
115 
get_nrf_spim_mode(uint16_t operation)116 static inline nrf_spim_mode_t get_nrf_spim_mode(uint16_t operation)
117 {
118 	if (SPI_MODE_GET(operation) & SPI_MODE_CPOL) {
119 		if (SPI_MODE_GET(operation) & SPI_MODE_CPHA) {
120 			return NRF_SPIM_MODE_3;
121 		} else {
122 			return NRF_SPIM_MODE_2;
123 		}
124 	} else {
125 		if (SPI_MODE_GET(operation) & SPI_MODE_CPHA) {
126 			return NRF_SPIM_MODE_1;
127 		} else {
128 			return NRF_SPIM_MODE_0;
129 		}
130 	}
131 }
132 
get_nrf_spim_bit_order(uint16_t operation)133 static inline nrf_spim_bit_order_t get_nrf_spim_bit_order(uint16_t operation)
134 {
135 	if (operation & SPI_TRANSFER_LSB) {
136 		return NRF_SPIM_BIT_ORDER_LSB_FIRST;
137 	} else {
138 		return NRF_SPIM_BIT_ORDER_MSB_FIRST;
139 	}
140 }
141 
configure(const struct device * dev,const struct spi_config * spi_cfg)142 static int configure(const struct device *dev,
143 		     const struct spi_config *spi_cfg)
144 {
145 	struct spi_nrfx_data *dev_data = dev->data;
146 	const struct spi_nrfx_config *dev_config = dev->config;
147 	struct spi_context *ctx = &dev_data->ctx;
148 	uint32_t max_freq = dev_config->max_freq;
149 	nrfx_spim_config_t config;
150 	nrfx_err_t result;
151 
152 	if (dev_data->initialized && spi_context_configured(ctx, spi_cfg)) {
153 		/* Already configured. No need to do it again. */
154 		return 0;
155 	}
156 
157 	if (spi_cfg->operation & SPI_HALF_DUPLEX) {
158 		LOG_ERR("Half-duplex not supported");
159 		return -ENOTSUP;
160 	}
161 
162 	if (SPI_OP_MODE_GET(spi_cfg->operation) != SPI_OP_MODE_MASTER) {
163 		LOG_ERR("Slave mode is not supported on %s", dev->name);
164 		return -EINVAL;
165 	}
166 
167 	if (spi_cfg->operation & SPI_MODE_LOOP) {
168 		LOG_ERR("Loopback mode is not supported");
169 		return -EINVAL;
170 	}
171 
172 	if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) &&
173 	    (spi_cfg->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) {
174 		LOG_ERR("Only single line mode is supported");
175 		return -EINVAL;
176 	}
177 
178 	if (SPI_WORD_SIZE_GET(spi_cfg->operation) != 8) {
179 		LOG_ERR("Word sizes other than 8 bits are not supported");
180 		return -EINVAL;
181 	}
182 
183 	if (spi_cfg->frequency < 125000) {
184 		LOG_ERR("Frequencies lower than 125 kHz are not supported");
185 		return -EINVAL;
186 	}
187 
188 #if defined(CONFIG_SOC_NRF5340_CPUAPP)
189 	/* On nRF5340, the 32 Mbps speed is supported by the application core
190 	 * when it is running at 128 MHz (see the Timing specifications section
191 	 * in the nRF5340 PS).
192 	 */
193 	if (max_freq > 16000000 &&
194 	    nrf_clock_hfclk_div_get(NRF_CLOCK) != NRF_CLOCK_HFCLK_DIV_1) {
195 		max_freq = 16000000;
196 	}
197 #endif
198 
199 	config = dev_config->def_config;
200 
201 	/* Limit the frequency to that supported by the SPIM instance. */
202 	config.frequency = get_nrf_spim_frequency(MIN(spi_cfg->frequency,
203 						      max_freq));
204 	config.mode      = get_nrf_spim_mode(spi_cfg->operation);
205 	config.bit_order = get_nrf_spim_bit_order(spi_cfg->operation);
206 
207 	nrfy_gpio_pin_write(nrfy_spim_sck_pin_get(dev_config->spim.p_reg),
208 			    spi_cfg->operation & SPI_MODE_CPOL ? 1 : 0);
209 
210 	if (dev_data->initialized) {
211 		nrfx_spim_uninit(&dev_config->spim);
212 		dev_data->initialized = false;
213 	}
214 
215 	result = nrfx_spim_init(&dev_config->spim, &config,
216 				event_handler, (void *)dev);
217 	if (result != NRFX_SUCCESS) {
218 		LOG_ERR("Failed to initialize nrfx driver: %08x", result);
219 		return -EIO;
220 	}
221 
222 	dev_data->initialized = true;
223 
224 	ctx->config = spi_cfg;
225 
226 	return 0;
227 }
228 
229 #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58
230 static const nrfx_gpiote_t gpiote = NRFX_GPIOTE_INSTANCE(0);
231 
232 /*
233  * Brief Workaround for transmitting 1 byte with SPIM.
234  *
235  * Derived from the setup_workaround_for_ftpan_58() function from
236  * the nRF52832 Rev 1 Errata v1.6 document anomaly 58 workaround.
237  *
238  * Warning Must not be used when transmitting multiple bytes.
239  *
240  * Warning After this workaround is used, the user must reset the PPI
241  * channel and the GPIOTE channel before attempting to transmit multiple
242  * bytes.
243  */
anomaly_58_workaround_setup(const struct device * dev)244 static void anomaly_58_workaround_setup(const struct device *dev)
245 {
246 	struct spi_nrfx_data *dev_data = dev->data;
247 	const struct spi_nrfx_config *dev_config = dev->config;
248 	NRF_SPIM_Type *spim = dev_config->spim.p_reg;
249 	uint32_t ppi_ch = dev_data->ppi_ch;
250 	uint32_t gpiote_ch = dev_data->gpiote_ch;
251 	uint32_t eep = (uint32_t)&gpiote.p_reg->EVENTS_IN[gpiote_ch];
252 	uint32_t tep = (uint32_t)&spim->TASKS_STOP;
253 
254 	dev_data->anomaly_58_workaround_active = true;
255 
256 	/* Create an event when SCK toggles */
257 	nrf_gpiote_event_configure(gpiote.p_reg, gpiote_ch, spim->PSEL.SCK,
258 				   GPIOTE_CONFIG_POLARITY_Toggle);
259 	nrf_gpiote_event_enable(gpiote.p_reg, gpiote_ch);
260 
261 	/* Stop the spim instance when SCK toggles */
262 	nrf_ppi_channel_endpoint_setup(NRF_PPI, ppi_ch, eep, tep);
263 	nrf_ppi_channel_enable(NRF_PPI, ppi_ch);
264 
265 	/* The spim instance cannot be stopped mid-byte, so it will finish
266 	 * transmitting the first byte and then stop. Effectively ensuring
267 	 * that only 1 byte is transmitted.
268 	 */
269 }
270 
anomaly_58_workaround_clear(struct spi_nrfx_data * dev_data)271 static void anomaly_58_workaround_clear(struct spi_nrfx_data *dev_data)
272 {
273 	uint32_t ppi_ch = dev_data->ppi_ch;
274 	uint32_t gpiote_ch = dev_data->gpiote_ch;
275 
276 	if (dev_data->anomaly_58_workaround_active) {
277 		nrf_ppi_channel_disable(NRF_PPI, ppi_ch);
278 		nrf_gpiote_task_disable(gpiote.p_reg, gpiote_ch);
279 
280 		dev_data->anomaly_58_workaround_active = false;
281 	}
282 }
283 
anomaly_58_workaround_init(const struct device * dev)284 static int anomaly_58_workaround_init(const struct device *dev)
285 {
286 	struct spi_nrfx_data *dev_data = dev->data;
287 	const struct spi_nrfx_config *dev_config = dev->config;
288 	nrfx_err_t err_code;
289 
290 	dev_data->anomaly_58_workaround_active = false;
291 
292 	if (dev_config->anomaly_58_workaround) {
293 		err_code = nrfx_ppi_channel_alloc(&dev_data->ppi_ch);
294 		if (err_code != NRFX_SUCCESS) {
295 			LOG_ERR("Failed to allocate PPI channel");
296 			return -ENODEV;
297 		}
298 
299 		err_code = nrfx_gpiote_channel_alloc(&gpiote, &dev_data->gpiote_ch);
300 		if (err_code != NRFX_SUCCESS) {
301 			LOG_ERR("Failed to allocate GPIOTE channel");
302 			return -ENODEV;
303 		}
304 		LOG_DBG("PAN 58 workaround enabled for %s: ppi %u, gpiote %u",
305 			dev->name, dev_data->ppi_ch, dev_data->gpiote_ch);
306 	}
307 
308 	return 0;
309 }
310 #endif
311 
finish_transaction(const struct device * dev,int error)312 static void finish_transaction(const struct device *dev, int error)
313 {
314 	struct spi_nrfx_data *dev_data = dev->data;
315 	struct spi_context *ctx = &dev_data->ctx;
316 
317 	LOG_DBG("Transaction finished with status %d", error);
318 
319 	spi_context_complete(ctx, dev, error);
320 	dev_data->busy = false;
321 
322 	finalize_spi_transaction(dev, true);
323 }
324 
transfer_next_chunk(const struct device * dev)325 static void transfer_next_chunk(const struct device *dev)
326 {
327 	struct spi_nrfx_data *dev_data = dev->data;
328 	const struct spi_nrfx_config *dev_config = dev->config;
329 	struct spi_context *ctx = &dev_data->ctx;
330 	int error = 0;
331 
332 	size_t chunk_len = spi_context_max_continuous_chunk(ctx);
333 
334 	if (chunk_len > 0) {
335 		nrfx_spim_xfer_desc_t xfer;
336 		nrfx_err_t result;
337 		const uint8_t *tx_buf = ctx->tx_buf;
338 		uint8_t *rx_buf = ctx->rx_buf;
339 
340 		if (chunk_len > dev_config->max_chunk_len) {
341 			chunk_len = dev_config->max_chunk_len;
342 		}
343 
344 #ifdef SPI_BUFFER_IN_RAM
345 		if (spi_context_tx_buf_on(ctx) &&
346 		    !nrf_dma_accessible_check(&dev_config->spim.p_reg, tx_buf)) {
347 
348 			if (chunk_len > CONFIG_SPI_NRFX_RAM_BUFFER_SIZE) {
349 				chunk_len = CONFIG_SPI_NRFX_RAM_BUFFER_SIZE;
350 			}
351 
352 			memcpy(dev_data->tx_buffer, tx_buf, chunk_len);
353 #ifdef CONFIG_DCACHE
354 			if (dev_config->mem_attr & DT_MEM_CACHEABLE) {
355 				sys_cache_data_flush_range(dev_data->tx_buffer, chunk_len);
356 			}
357 #endif
358 			tx_buf = dev_data->tx_buffer;
359 		}
360 
361 		if (spi_context_rx_buf_on(ctx) &&
362 		    !nrf_dma_accessible_check(&dev_config->spim.p_reg, rx_buf)) {
363 
364 			if (chunk_len > CONFIG_SPI_NRFX_RAM_BUFFER_SIZE) {
365 				chunk_len = CONFIG_SPI_NRFX_RAM_BUFFER_SIZE;
366 			}
367 
368 			rx_buf = dev_data->rx_buffer;
369 		}
370 #endif
371 
372 		dev_data->chunk_len = chunk_len;
373 
374 		xfer.p_tx_buffer = tx_buf;
375 		xfer.tx_length   = spi_context_tx_buf_on(ctx) ? chunk_len : 0;
376 		xfer.p_rx_buffer = rx_buf;
377 		xfer.rx_length   = spi_context_rx_buf_on(ctx) ? chunk_len : 0;
378 
379 #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58
380 		if (xfer.rx_length == 1 && xfer.tx_length <= 1) {
381 			if (dev_config->anomaly_58_workaround) {
382 				anomaly_58_workaround_setup(dev);
383 			} else {
384 				LOG_WRN("Transaction aborted since it would trigger "
385 					"nRF52832 PAN 58");
386 				error = -EIO;
387 			}
388 		}
389 #endif
390 		if (error == 0) {
391 			result = nrfx_spim_xfer(&dev_config->spim, &xfer, 0);
392 			if (result == NRFX_SUCCESS) {
393 				return;
394 			}
395 			error = -EIO;
396 #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58
397 			anomaly_58_workaround_clear(dev_data);
398 #endif
399 		}
400 	}
401 
402 	finish_transaction(dev, error);
403 }
404 
event_handler(const nrfx_spim_evt_t * p_event,void * p_context)405 static void event_handler(const nrfx_spim_evt_t *p_event, void *p_context)
406 {
407 	const struct device *dev = p_context;
408 	struct spi_nrfx_data *dev_data = dev->data;
409 #ifdef CONFIG_DCACHE
410 	const struct spi_nrfx_config *dev_config = dev->config;
411 #endif
412 
413 	if (p_event->type == NRFX_SPIM_EVENT_DONE) {
414 		/* Chunk length is set to 0 when a transaction is aborted
415 		 * due to a timeout.
416 		 */
417 		if (dev_data->chunk_len == 0) {
418 			finish_transaction(dev_data->dev, -ETIMEDOUT);
419 			return;
420 		}
421 
422 #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58
423 		anomaly_58_workaround_clear(dev_data);
424 #endif
425 #ifdef SPI_BUFFER_IN_RAM
426 		if (spi_context_rx_buf_on(&dev_data->ctx) &&
427 		    p_event->xfer_desc.p_rx_buffer != NULL &&
428 		    p_event->xfer_desc.p_rx_buffer != dev_data->ctx.rx_buf) {
429 #ifdef CONFIG_DCACHE
430 			if (dev_config->mem_attr & DT_MEM_CACHEABLE) {
431 				sys_cache_data_invd_range(dev_data->rx_buffer, dev_data->chunk_len);
432 			}
433 #endif
434 			(void)memcpy(dev_data->ctx.rx_buf,
435 				     dev_data->rx_buffer,
436 				     dev_data->chunk_len);
437 		}
438 #endif
439 		spi_context_update_tx(&dev_data->ctx, 1, dev_data->chunk_len);
440 		spi_context_update_rx(&dev_data->ctx, 1, dev_data->chunk_len);
441 
442 		transfer_next_chunk(dev_data->dev);
443 	}
444 }
445 
transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)446 static int transceive(const struct device *dev,
447 		      const struct spi_config *spi_cfg,
448 		      const struct spi_buf_set *tx_bufs,
449 		      const struct spi_buf_set *rx_bufs,
450 		      bool asynchronous,
451 		      spi_callback_t cb,
452 		      void *userdata)
453 {
454 	struct spi_nrfx_data *dev_data = dev->data;
455 	const struct spi_nrfx_config *dev_config = dev->config;
456 	void *reg = dev_config->spim.p_reg;
457 	int error;
458 
459 	spi_context_lock(&dev_data->ctx, asynchronous, cb, userdata, spi_cfg);
460 
461 	error = configure(dev, spi_cfg);
462 	if (error == 0) {
463 		dev_data->busy = true;
464 
465 		if (dev_config->wake_pin != WAKE_PIN_NOT_USED) {
466 			error = spi_nrfx_wake_request(&dev_config->wake_gpiote,
467 						      dev_config->wake_pin);
468 			if (error == -ETIMEDOUT) {
469 				LOG_WRN("Waiting for WAKE acknowledgment timed out");
470 				/* If timeout occurs, try to perform the transfer
471 				 * anyway, just in case the slave device was unable
472 				 * to signal that it was already awaken and prepared
473 				 * for the transfer.
474 				 */
475 			}
476 		}
477 
478 		spi_context_buffers_setup(&dev_data->ctx, tx_bufs, rx_bufs, 1);
479 		if (NRF_SPIM_IS_320MHZ_SPIM(reg)) {
480 			nrfy_spim_enable(reg);
481 		}
482 		spi_context_cs_control(&dev_data->ctx, true);
483 
484 		transfer_next_chunk(dev);
485 
486 		error = spi_context_wait_for_completion(&dev_data->ctx);
487 		if (error == -ETIMEDOUT) {
488 			/* Set the chunk length to 0 so that event_handler()
489 			 * knows that the transaction timed out and is to be
490 			 * aborted.
491 			 */
492 			dev_data->chunk_len = 0;
493 			/* Abort the current transfer by deinitializing
494 			 * the nrfx driver.
495 			 */
496 			nrfx_spim_uninit(&dev_config->spim);
497 			dev_data->initialized = false;
498 
499 			/* Make sure the transaction is finished (it may be
500 			 * already finished if it actually did complete before
501 			 * the nrfx driver was deinitialized).
502 			 */
503 			finish_transaction(dev, -ETIMEDOUT);
504 
505 			/* Clean up the driver state. */
506 			k_sem_reset(&dev_data->ctx.sync);
507 #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58
508 			anomaly_58_workaround_clear(dev_data);
509 #endif
510 		} else if (error) {
511 			finalize_spi_transaction(dev, true);
512 		}
513 	}
514 
515 	spi_context_release(&dev_data->ctx, error);
516 
517 	return error;
518 }
519 
spi_nrfx_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)520 static int spi_nrfx_transceive(const struct device *dev,
521 			       const struct spi_config *spi_cfg,
522 			       const struct spi_buf_set *tx_bufs,
523 			       const struct spi_buf_set *rx_bufs)
524 {
525 	return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL);
526 }
527 
528 #ifdef CONFIG_SPI_ASYNC
spi_nrfx_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)529 static int spi_nrfx_transceive_async(const struct device *dev,
530 				     const struct spi_config *spi_cfg,
531 				     const struct spi_buf_set *tx_bufs,
532 				     const struct spi_buf_set *rx_bufs,
533 				     spi_callback_t cb,
534 				     void *userdata)
535 {
536 	return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata);
537 }
538 #endif /* CONFIG_SPI_ASYNC */
539 
spi_nrfx_release(const struct device * dev,const struct spi_config * spi_cfg)540 static int spi_nrfx_release(const struct device *dev,
541 			    const struct spi_config *spi_cfg)
542 {
543 	struct spi_nrfx_data *dev_data = dev->data;
544 
545 	if (!spi_context_configured(&dev_data->ctx, spi_cfg)) {
546 		return -EINVAL;
547 	}
548 
549 	if (dev_data->busy) {
550 		return -EBUSY;
551 	}
552 
553 	spi_context_unlock_unconditionally(&dev_data->ctx);
554 	finalize_spi_transaction(dev, false);
555 
556 	return 0;
557 }
558 
559 static const struct spi_driver_api spi_nrfx_driver_api = {
560 	.transceive = spi_nrfx_transceive,
561 #ifdef CONFIG_SPI_ASYNC
562 	.transceive_async = spi_nrfx_transceive_async,
563 #endif
564 	.release = spi_nrfx_release,
565 };
566 
567 #ifdef CONFIG_PM_DEVICE
spim_nrfx_pm_action(const struct device * dev,enum pm_device_action action)568 static int spim_nrfx_pm_action(const struct device *dev,
569 			       enum pm_device_action action)
570 {
571 	int ret = 0;
572 	struct spi_nrfx_data *dev_data = dev->data;
573 	const struct spi_nrfx_config *dev_config = dev->config;
574 
575 	switch (action) {
576 	case PM_DEVICE_ACTION_RESUME:
577 		ret = pinctrl_apply_state(dev_config->pcfg,
578 					  PINCTRL_STATE_DEFAULT);
579 		if (ret < 0) {
580 			return ret;
581 		}
582 		/* nrfx_spim_init() will be called at configuration before
583 		 * the next transfer.
584 		 */
585 		break;
586 
587 	case PM_DEVICE_ACTION_SUSPEND:
588 		if (dev_data->initialized) {
589 			nrfx_spim_uninit(&dev_config->spim);
590 			dev_data->initialized = false;
591 		}
592 
593 		ret = pinctrl_apply_state(dev_config->pcfg,
594 					  PINCTRL_STATE_SLEEP);
595 		if (ret < 0) {
596 			return ret;
597 		}
598 		break;
599 
600 	default:
601 		ret = -ENOTSUP;
602 	}
603 
604 	return ret;
605 }
606 #endif /* CONFIG_PM_DEVICE */
607 
608 
spi_nrfx_init(const struct device * dev)609 static int spi_nrfx_init(const struct device *dev)
610 {
611 	const struct spi_nrfx_config *dev_config = dev->config;
612 	struct spi_nrfx_data *dev_data = dev->data;
613 	int err;
614 
615 	err = pinctrl_apply_state(dev_config->pcfg, PINCTRL_STATE_DEFAULT);
616 	if (err < 0) {
617 		return err;
618 	}
619 
620 	if (dev_config->wake_pin != WAKE_PIN_NOT_USED) {
621 		err = spi_nrfx_wake_init(&dev_config->wake_gpiote, dev_config->wake_pin);
622 		if (err == -ENODEV) {
623 			LOG_ERR("Failed to allocate GPIOTE channel for WAKE");
624 			return err;
625 		}
626 		if (err == -EIO) {
627 			LOG_ERR("Failed to configure WAKE pin");
628 			return err;
629 		}
630 	}
631 
632 	dev_config->irq_connect();
633 
634 	err = spi_context_cs_configure_all(&dev_data->ctx);
635 	if (err < 0) {
636 		return err;
637 	}
638 
639 	spi_context_unlock_unconditionally(&dev_data->ctx);
640 
641 #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58
642 	return anomaly_58_workaround_init(dev);
643 #else
644 	return 0;
645 #endif
646 }
647 /*
648  * We use NODELABEL here because the nrfx API requires us to call
649  * functions which are named according to SoC peripheral instance
650  * being operated on. Since DT_INST() makes no guarantees about that,
651  * it won't work.
652  */
653 #define SPIM(idx)			DT_NODELABEL(spi##idx)
654 #define SPIM_PROP(idx, prop)		DT_PROP(SPIM(idx), prop)
655 #define SPIM_HAS_PROP(idx, prop)	DT_NODE_HAS_PROP(SPIM(idx), prop)
656 #define SPIM_MEM_REGION(idx)		DT_PHANDLE(SPIM(idx), memory_regions)
657 
658 #define SPI_NRFX_SPIM_EXTENDED_CONFIG(idx)				\
659 	IF_ENABLED(NRFX_SPIM_EXTENDED_ENABLED,				\
660 		(.dcx_pin = NRF_SPIM_PIN_NOT_CONNECTED,			\
661 		 COND_CODE_1(SPIM_PROP(idx, rx_delay_supported),	\
662 			     (.rx_delay = SPIM_PROP(idx, rx_delay),),	\
663 			     ())					\
664 		))
665 
666 #define SPIM_GET_MEM_ATTR(idx)								 \
667 	COND_CODE_1(SPIM_HAS_PROP(idx, memory_regions),					 \
668 		(COND_CODE_1(DT_NODE_HAS_PROP(SPIM_MEM_REGION(idx), zephyr_memory_attr), \
669 			(DT_PROP(SPIM_MEM_REGION(idx), zephyr_memory_attr)),		 \
670 			(0))),								 \
671 		(0))
672 
673 #define SPI_NRFX_SPIM_DEFINE(idx)					       \
674 	NRF_DT_CHECK_NODE_HAS_PINCTRL_SLEEP(SPIM(idx));			       \
675 	static void irq_connect##idx(void)				       \
676 	{								       \
677 		IRQ_CONNECT(DT_IRQN(SPIM(idx)), DT_IRQ(SPIM(idx), priority),   \
678 			    nrfx_isr, nrfx_spim_##idx##_irq_handler, 0);       \
679 	}								       \
680 	IF_ENABLED(SPI_BUFFER_IN_RAM,					       \
681 		(static uint8_t spim_##idx##_tx_buffer			       \
682 			[CONFIG_SPI_NRFX_RAM_BUFFER_SIZE]		       \
683 			SPIM_MEMORY_SECTION(idx);			       \
684 		 static uint8_t spim_##idx##_rx_buffer			       \
685 			[CONFIG_SPI_NRFX_RAM_BUFFER_SIZE]		       \
686 			SPIM_MEMORY_SECTION(idx);))			       \
687 	static struct spi_nrfx_data spi_##idx##_data = {		       \
688 		SPI_CONTEXT_INIT_LOCK(spi_##idx##_data, ctx),		       \
689 		SPI_CONTEXT_INIT_SYNC(spi_##idx##_data, ctx),		       \
690 		SPI_CONTEXT_CS_GPIOS_INITIALIZE(SPIM(idx), ctx)		       \
691 		IF_ENABLED(SPI_BUFFER_IN_RAM,				       \
692 			(.tx_buffer = spim_##idx##_tx_buffer,		       \
693 			 .rx_buffer = spim_##idx##_rx_buffer,))		       \
694 		.dev  = DEVICE_DT_GET(SPIM(idx)),			       \
695 		.busy = false,						       \
696 	};								       \
697 	PINCTRL_DT_DEFINE(SPIM(idx));					       \
698 	static const struct spi_nrfx_config spi_##idx##z_config = {	       \
699 		.spim = {						       \
700 			.p_reg = (NRF_SPIM_Type *)DT_REG_ADDR(SPIM(idx)),      \
701 			.drv_inst_idx = NRFX_SPIM##idx##_INST_IDX,	       \
702 		},							       \
703 		.max_freq = SPIM_PROP(idx, max_frequency),		       \
704 		.def_config = {						       \
705 			.skip_gpio_cfg = true,				       \
706 			.skip_psel_cfg = true,				       \
707 			.ss_pin = NRF_SPIM_PIN_NOT_CONNECTED,		       \
708 			.orc    = SPIM_PROP(idx, overrun_character),	       \
709 			SPI_NRFX_SPIM_EXTENDED_CONFIG(idx)		       \
710 		},							       \
711 		.irq_connect = irq_connect##idx,			       \
712 		.pcfg = PINCTRL_DT_DEV_CONFIG_GET(SPIM(idx)),		       \
713 		.max_chunk_len = BIT_MASK(SPIM_PROP(idx, easydma_maxcnt_bits)),\
714 		COND_CODE_1(CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58,     \
715 			(.anomaly_58_workaround =			       \
716 				SPIM_PROP(idx, anomaly_58_workaround),),       \
717 			())						       \
718 		.wake_pin = NRF_DT_GPIOS_TO_PSEL_OR(SPIM(idx), wake_gpios,     \
719 						    WAKE_PIN_NOT_USED),	       \
720 		.wake_gpiote = WAKE_GPIOTE_INSTANCE(SPIM(idx)),		       \
721 		IF_ENABLED(CONFIG_DCACHE,				       \
722 			(.mem_attr = SPIM_GET_MEM_ATTR(idx),))		       \
723 	};								       \
724 	BUILD_ASSERT(!SPIM_HAS_PROP(idx, wake_gpios) ||			       \
725 		     !(DT_GPIO_FLAGS(SPIM(idx), wake_gpios) & GPIO_ACTIVE_LOW),\
726 		     "WAKE line must be configured as active high");	       \
727 	PM_DEVICE_DT_DEFINE(SPIM(idx), spim_nrfx_pm_action);		       \
728 	DEVICE_DT_DEFINE(SPIM(idx),					       \
729 		      spi_nrfx_init,					       \
730 		      PM_DEVICE_DT_GET(SPIM(idx)),			       \
731 		      &spi_##idx##_data,				       \
732 		      &spi_##idx##z_config,				       \
733 		      POST_KERNEL, CONFIG_SPI_INIT_PRIORITY,		       \
734 		      &spi_nrfx_driver_api)
735 
736 #define SPIM_MEMORY_SECTION(idx)					       \
737 	COND_CODE_1(SPIM_HAS_PROP(idx, memory_regions),			       \
738 		(__attribute__((__section__(LINKER_DT_NODE_REGION_NAME(	       \
739 			SPIM_MEM_REGION(idx)))))),			       \
740 		())
741 
742 #ifdef CONFIG_HAS_HW_NRF_SPIM0
743 SPI_NRFX_SPIM_DEFINE(0);
744 #endif
745 
746 #ifdef CONFIG_HAS_HW_NRF_SPIM1
747 SPI_NRFX_SPIM_DEFINE(1);
748 #endif
749 
750 #ifdef CONFIG_HAS_HW_NRF_SPIM2
751 SPI_NRFX_SPIM_DEFINE(2);
752 #endif
753 
754 #ifdef CONFIG_HAS_HW_NRF_SPIM3
755 SPI_NRFX_SPIM_DEFINE(3);
756 #endif
757 
758 #ifdef CONFIG_HAS_HW_NRF_SPIM4
759 SPI_NRFX_SPIM_DEFINE(4);
760 #endif
761 
762 #ifdef CONFIG_HAS_HW_NRF_SPIM00
763 SPI_NRFX_SPIM_DEFINE(00);
764 #endif
765 
766 #ifdef CONFIG_HAS_HW_NRF_SPIM20
767 SPI_NRFX_SPIM_DEFINE(20);
768 #endif
769 
770 #ifdef CONFIG_HAS_HW_NRF_SPIM21
771 SPI_NRFX_SPIM_DEFINE(21);
772 #endif
773 
774 #ifdef CONFIG_HAS_HW_NRF_SPIM22
775 SPI_NRFX_SPIM_DEFINE(22);
776 #endif
777 
778 #ifdef CONFIG_HAS_HW_NRF_SPIM30
779 SPI_NRFX_SPIM_DEFINE(30);
780 #endif
781 
782 #ifdef CONFIG_HAS_HW_NRF_SPIM120
783 SPI_NRFX_SPIM_DEFINE(120);
784 #endif
785 
786 #ifdef CONFIG_HAS_HW_NRF_SPIM121
787 SPI_NRFX_SPIM_DEFINE(121);
788 #endif
789 
790 #ifdef CONFIG_HAS_HW_NRF_SPIM130
791 SPI_NRFX_SPIM_DEFINE(130);
792 #endif
793 
794 #ifdef CONFIG_HAS_HW_NRF_SPIM131
795 SPI_NRFX_SPIM_DEFINE(131);
796 #endif
797 
798 #ifdef CONFIG_HAS_HW_NRF_SPIM132
799 SPI_NRFX_SPIM_DEFINE(132);
800 #endif
801 
802 #ifdef CONFIG_HAS_HW_NRF_SPIM133
803 SPI_NRFX_SPIM_DEFINE(133);
804 #endif
805 
806 #ifdef CONFIG_HAS_HW_NRF_SPIM134
807 SPI_NRFX_SPIM_DEFINE(134);
808 #endif
809 
810 #ifdef CONFIG_HAS_HW_NRF_SPIM135
811 SPI_NRFX_SPIM_DEFINE(135);
812 #endif
813 
814 #ifdef CONFIG_HAS_HW_NRF_SPIM136
815 SPI_NRFX_SPIM_DEFINE(136);
816 #endif
817 
818 #ifdef CONFIG_HAS_HW_NRF_SPIM137
819 SPI_NRFX_SPIM_DEFINE(137);
820 #endif
821