1 /*
2  * Copyright (c) 2017 - 2018, Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/drivers/spi.h>
8 #include <zephyr/drivers/spi/rtio.h>
9 #include <zephyr/cache.h>
10 #include <zephyr/pm/device.h>
11 #include <zephyr/pm/device_runtime.h>
12 #include <zephyr/drivers/clock_control/nrf_clock_control.h>
13 #include <zephyr/drivers/pinctrl.h>
14 #include <zephyr/mem_mgmt/mem_attr.h>
15 #include <soc.h>
16 #ifdef CONFIG_SOC_NRF54H20_GPD
17 #include <nrf/gpd.h>
18 #endif
19 #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58
20 #include <nrfx_ppi.h>
21 #endif
22 #ifdef CONFIG_SOC_NRF5340_CPUAPP
23 #include <hal/nrf_clock.h>
24 #endif
25 #include <nrfx_spim.h>
26 #include <string.h>
27 #include <zephyr/linker/devicetree_regions.h>
28 
29 #include <zephyr/logging/log.h>
30 #include <zephyr/irq.h>
31 LOG_MODULE_REGISTER(spi_nrfx_spim, CONFIG_SPI_LOG_LEVEL);
32 
33 #include "spi_context.h"
34 #include "spi_nrfx_common.h"
35 
36 #if defined(CONFIG_SOC_NRF52832) && !defined(CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58)
37 #error  This driver is not available by default for nRF52832 because of Product Anomaly 58 \
38 	(SPIM: An additional byte is clocked out when RXD.MAXCNT == 1 and TXD.MAXCNT <= 1). \
39 	Use CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58=y to override this limitation.
40 #endif
41 
42 #if (CONFIG_SPI_NRFX_RAM_BUFFER_SIZE > 0)
43 #define SPI_BUFFER_IN_RAM 1
44 #endif
45 
46 #if defined(CONFIG_CLOCK_CONTROL_NRF2_GLOBAL_HSFLL) && \
47 	(defined(CONFIG_HAS_HW_NRF_SPIM120) || \
48 	 defined(CONFIG_HAS_HW_NRF_SPIM121))
49 #define SPIM_REQUESTS_CLOCK(idx) UTIL_OR(IS_EQ(idx, 120), \
50 					 IS_EQ(idx, 121))
51 #define USE_CLOCK_REQUESTS 1
52 #else
53 #define SPIM_REQUESTS_CLOCK(idx) 0
54 #endif
55 
56 struct spi_nrfx_data {
57 	struct spi_context ctx;
58 	const struct device *dev;
59 	size_t  chunk_len;
60 	bool    busy;
61 	bool    initialized;
62 #ifdef SPI_BUFFER_IN_RAM
63 	uint8_t *tx_buffer;
64 	uint8_t *rx_buffer;
65 #endif
66 #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58
67 	bool    anomaly_58_workaround_active;
68 	uint8_t ppi_ch;
69 	uint8_t gpiote_ch;
70 #endif
71 #ifdef USE_CLOCK_REQUESTS
72 	bool clock_requested;
73 #endif
74 };
75 
76 struct spi_nrfx_config {
77 	nrfx_spim_t	   spim;
78 	uint32_t	   max_freq;
79 	nrfx_spim_config_t def_config;
80 	void (*irq_connect)(void);
81 	uint16_t max_chunk_len;
82 	const struct pinctrl_dev_config *pcfg;
83 #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58
84 	bool anomaly_58_workaround;
85 #endif
86 	uint32_t wake_pin;
87 	nrfx_gpiote_t wake_gpiote;
88 #ifdef CONFIG_DCACHE
89 	uint32_t mem_attr;
90 #endif
91 #ifdef USE_CLOCK_REQUESTS
92 	const struct device *clk_dev;
93 	struct nrf_clock_spec clk_spec;
94 #endif
95 };
96 
97 static void event_handler(const nrfx_spim_evt_t *p_event, void *p_context);
98 
request_clock(const struct device * dev)99 static inline int request_clock(const struct device *dev)
100 {
101 #ifdef USE_CLOCK_REQUESTS
102 	struct spi_nrfx_data *dev_data = dev->data;
103 	const struct spi_nrfx_config *dev_config = dev->config;
104 	int error;
105 
106 	if (!dev_config->clk_dev) {
107 		return 0;
108 	}
109 
110 	error = nrf_clock_control_request_sync(
111 			dev_config->clk_dev, &dev_config->clk_spec,
112 			K_MSEC(CONFIG_SPI_COMPLETION_TIMEOUT_TOLERANCE));
113 	if (error < 0) {
114 		LOG_ERR("Failed to request clock: %d", error);
115 		return error;
116 	}
117 
118 	dev_data->clock_requested = true;
119 #else
120 	ARG_UNUSED(dev);
121 #endif
122 
123 	return 0;
124 }
125 
release_clock(const struct device * dev)126 static inline void release_clock(const struct device *dev)
127 {
128 #ifdef USE_CLOCK_REQUESTS
129 	struct spi_nrfx_data *dev_data = dev->data;
130 	const struct spi_nrfx_config *dev_config = dev->config;
131 
132 	if (!dev_data->clock_requested) {
133 		return;
134 	}
135 
136 	dev_data->clock_requested = false;
137 
138 	nrf_clock_control_release(dev_config->clk_dev, &dev_config->clk_spec);
139 #else
140 	ARG_UNUSED(dev);
141 #endif
142 }
143 
finalize_spi_transaction(const struct device * dev,bool deactivate_cs)144 static inline void finalize_spi_transaction(const struct device *dev, bool deactivate_cs)
145 {
146 	struct spi_nrfx_data *dev_data = dev->data;
147 	const struct spi_nrfx_config *dev_config = dev->config;
148 	void *reg = dev_config->spim.p_reg;
149 
150 	if (deactivate_cs) {
151 		spi_context_cs_control(&dev_data->ctx, false);
152 	}
153 
154 	if (NRF_SPIM_IS_320MHZ_SPIM(reg) && !(dev_data->ctx.config->operation & SPI_HOLD_ON_CS)) {
155 		nrfy_spim_disable(reg);
156 	}
157 
158 	if (!IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
159 		release_clock(dev);
160 	}
161 
162 	pm_device_runtime_put_async(dev, K_NO_WAIT);
163 }
164 
get_nrf_spim_frequency(uint32_t frequency)165 static inline uint32_t get_nrf_spim_frequency(uint32_t frequency)
166 {
167 	/* Get the highest supported frequency not exceeding the requested one.
168 	 */
169 	if (frequency >= MHZ(32) && (NRF_SPIM_HAS_32_MHZ_FREQ || NRF_SPIM_HAS_PRESCALER)) {
170 		return MHZ(32);
171 	} else if (frequency >= MHZ(16) && (NRF_SPIM_HAS_16_MHZ_FREQ || NRF_SPIM_HAS_PRESCALER)) {
172 		return MHZ(16);
173 	} else if (frequency >= MHZ(8)) {
174 		return MHZ(8);
175 	} else if (frequency >= MHZ(4)) {
176 		return MHZ(4);
177 	} else if (frequency >= MHZ(2)) {
178 		return MHZ(2);
179 	} else if (frequency >= MHZ(1)) {
180 		return MHZ(1);
181 	} else if (frequency >= KHZ(500)) {
182 		return KHZ(500);
183 	} else if (frequency >= KHZ(250)) {
184 		return KHZ(250);
185 	} else {
186 		return KHZ(125);
187 	}
188 }
189 
get_nrf_spim_mode(uint16_t operation)190 static inline nrf_spim_mode_t get_nrf_spim_mode(uint16_t operation)
191 {
192 	if (SPI_MODE_GET(operation) & SPI_MODE_CPOL) {
193 		if (SPI_MODE_GET(operation) & SPI_MODE_CPHA) {
194 			return NRF_SPIM_MODE_3;
195 		} else {
196 			return NRF_SPIM_MODE_2;
197 		}
198 	} else {
199 		if (SPI_MODE_GET(operation) & SPI_MODE_CPHA) {
200 			return NRF_SPIM_MODE_1;
201 		} else {
202 			return NRF_SPIM_MODE_0;
203 		}
204 	}
205 }
206 
get_nrf_spim_bit_order(uint16_t operation)207 static inline nrf_spim_bit_order_t get_nrf_spim_bit_order(uint16_t operation)
208 {
209 	if (operation & SPI_TRANSFER_LSB) {
210 		return NRF_SPIM_BIT_ORDER_LSB_FIRST;
211 	} else {
212 		return NRF_SPIM_BIT_ORDER_MSB_FIRST;
213 	}
214 }
215 
configure(const struct device * dev,const struct spi_config * spi_cfg)216 static int configure(const struct device *dev,
217 		     const struct spi_config *spi_cfg)
218 {
219 	struct spi_nrfx_data *dev_data = dev->data;
220 	const struct spi_nrfx_config *dev_config = dev->config;
221 	struct spi_context *ctx = &dev_data->ctx;
222 	uint32_t max_freq = dev_config->max_freq;
223 	nrfx_spim_config_t config;
224 	nrfx_err_t result;
225 
226 	if (dev_data->initialized && spi_context_configured(ctx, spi_cfg)) {
227 		/* Already configured. No need to do it again. */
228 		return 0;
229 	}
230 
231 	if (spi_cfg->operation & SPI_HALF_DUPLEX) {
232 		LOG_ERR("Half-duplex not supported");
233 		return -ENOTSUP;
234 	}
235 
236 	if (SPI_OP_MODE_GET(spi_cfg->operation) != SPI_OP_MODE_MASTER) {
237 		LOG_ERR("Slave mode is not supported on %s", dev->name);
238 		return -EINVAL;
239 	}
240 
241 	if (spi_cfg->operation & SPI_MODE_LOOP) {
242 		LOG_ERR("Loopback mode is not supported");
243 		return -EINVAL;
244 	}
245 
246 	if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) &&
247 	    (spi_cfg->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) {
248 		LOG_ERR("Only single line mode is supported");
249 		return -EINVAL;
250 	}
251 
252 	if (SPI_WORD_SIZE_GET(spi_cfg->operation) != 8) {
253 		LOG_ERR("Word sizes other than 8 bits are not supported");
254 		return -EINVAL;
255 	}
256 
257 	if (spi_cfg->frequency < 125000) {
258 		LOG_ERR("Frequencies lower than 125 kHz are not supported");
259 		return -EINVAL;
260 	}
261 
262 #if defined(CONFIG_SOC_NRF5340_CPUAPP)
263 	/* On nRF5340, the 32 Mbps speed is supported by the application core
264 	 * when it is running at 128 MHz (see the Timing specifications section
265 	 * in the nRF5340 PS).
266 	 */
267 	if (max_freq > 16000000 &&
268 	    nrf_clock_hfclk_div_get(NRF_CLOCK) != NRF_CLOCK_HFCLK_DIV_1) {
269 		max_freq = 16000000;
270 	}
271 #endif
272 
273 	config = dev_config->def_config;
274 
275 	/* Limit the frequency to that supported by the SPIM instance. */
276 	config.frequency = get_nrf_spim_frequency(MIN(spi_cfg->frequency,
277 						      max_freq));
278 	config.mode      = get_nrf_spim_mode(spi_cfg->operation);
279 	config.bit_order = get_nrf_spim_bit_order(spi_cfg->operation);
280 
281 	nrfy_gpio_pin_write(nrfy_spim_sck_pin_get(dev_config->spim.p_reg),
282 			    spi_cfg->operation & SPI_MODE_CPOL ? 1 : 0);
283 
284 	if (dev_data->initialized) {
285 		nrfx_spim_uninit(&dev_config->spim);
286 		dev_data->initialized = false;
287 	}
288 
289 	result = nrfx_spim_init(&dev_config->spim, &config,
290 				event_handler, (void *)dev);
291 	if (result != NRFX_SUCCESS) {
292 		LOG_ERR("Failed to initialize nrfx driver: %08x", result);
293 		return -EIO;
294 	}
295 
296 	dev_data->initialized = true;
297 
298 	ctx->config = spi_cfg;
299 
300 	return 0;
301 }
302 
303 #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58
304 static const nrfx_gpiote_t gpiote = NRFX_GPIOTE_INSTANCE(0);
305 
306 /*
307  * Brief Workaround for transmitting 1 byte with SPIM.
308  *
309  * Derived from the setup_workaround_for_ftpan_58() function from
310  * the nRF52832 Rev 1 Errata v1.6 document anomaly 58 workaround.
311  *
312  * Warning Must not be used when transmitting multiple bytes.
313  *
314  * Warning After this workaround is used, the user must reset the PPI
315  * channel and the GPIOTE channel before attempting to transmit multiple
316  * bytes.
317  */
anomaly_58_workaround_setup(const struct device * dev)318 static void anomaly_58_workaround_setup(const struct device *dev)
319 {
320 	struct spi_nrfx_data *dev_data = dev->data;
321 	const struct spi_nrfx_config *dev_config = dev->config;
322 	NRF_SPIM_Type *spim = dev_config->spim.p_reg;
323 	uint32_t ppi_ch = dev_data->ppi_ch;
324 	uint32_t gpiote_ch = dev_data->gpiote_ch;
325 	uint32_t eep = (uint32_t)&gpiote.p_reg->EVENTS_IN[gpiote_ch];
326 	uint32_t tep = (uint32_t)&spim->TASKS_STOP;
327 
328 	dev_data->anomaly_58_workaround_active = true;
329 
330 	/* Create an event when SCK toggles */
331 	nrf_gpiote_event_configure(gpiote.p_reg, gpiote_ch, spim->PSEL.SCK,
332 				   GPIOTE_CONFIG_POLARITY_Toggle);
333 	nrf_gpiote_event_enable(gpiote.p_reg, gpiote_ch);
334 
335 	/* Stop the spim instance when SCK toggles */
336 	nrf_ppi_channel_endpoint_setup(NRF_PPI, ppi_ch, eep, tep);
337 	nrf_ppi_channel_enable(NRF_PPI, ppi_ch);
338 
339 	/* The spim instance cannot be stopped mid-byte, so it will finish
340 	 * transmitting the first byte and then stop. Effectively ensuring
341 	 * that only 1 byte is transmitted.
342 	 */
343 }
344 
anomaly_58_workaround_clear(struct spi_nrfx_data * dev_data)345 static void anomaly_58_workaround_clear(struct spi_nrfx_data *dev_data)
346 {
347 	uint32_t ppi_ch = dev_data->ppi_ch;
348 	uint32_t gpiote_ch = dev_data->gpiote_ch;
349 
350 	if (dev_data->anomaly_58_workaround_active) {
351 		nrf_ppi_channel_disable(NRF_PPI, ppi_ch);
352 		nrf_gpiote_task_disable(gpiote.p_reg, gpiote_ch);
353 
354 		dev_data->anomaly_58_workaround_active = false;
355 	}
356 }
357 
anomaly_58_workaround_init(const struct device * dev)358 static int anomaly_58_workaround_init(const struct device *dev)
359 {
360 	struct spi_nrfx_data *dev_data = dev->data;
361 	const struct spi_nrfx_config *dev_config = dev->config;
362 	nrfx_err_t err_code;
363 
364 	dev_data->anomaly_58_workaround_active = false;
365 
366 	if (dev_config->anomaly_58_workaround) {
367 		err_code = nrfx_ppi_channel_alloc(&dev_data->ppi_ch);
368 		if (err_code != NRFX_SUCCESS) {
369 			LOG_ERR("Failed to allocate PPI channel");
370 			return -ENODEV;
371 		}
372 
373 		err_code = nrfx_gpiote_channel_alloc(&gpiote, &dev_data->gpiote_ch);
374 		if (err_code != NRFX_SUCCESS) {
375 			LOG_ERR("Failed to allocate GPIOTE channel");
376 			return -ENODEV;
377 		}
378 		LOG_DBG("PAN 58 workaround enabled for %s: ppi %u, gpiote %u",
379 			dev->name, dev_data->ppi_ch, dev_data->gpiote_ch);
380 	}
381 
382 	return 0;
383 }
384 #endif
385 
finish_transaction(const struct device * dev,int error)386 static void finish_transaction(const struct device *dev, int error)
387 {
388 	struct spi_nrfx_data *dev_data = dev->data;
389 	struct spi_context *ctx = &dev_data->ctx;
390 
391 	LOG_DBG("Transaction finished with status %d", error);
392 
393 	spi_context_complete(ctx, dev, error);
394 	dev_data->busy = false;
395 
396 	finalize_spi_transaction(dev, true);
397 }
398 
transfer_next_chunk(const struct device * dev)399 static void transfer_next_chunk(const struct device *dev)
400 {
401 	struct spi_nrfx_data *dev_data = dev->data;
402 	const struct spi_nrfx_config *dev_config = dev->config;
403 	struct spi_context *ctx = &dev_data->ctx;
404 	int error = 0;
405 
406 	size_t chunk_len = spi_context_max_continuous_chunk(ctx);
407 
408 	if (chunk_len > 0) {
409 		nrfx_spim_xfer_desc_t xfer;
410 		nrfx_err_t result;
411 		const uint8_t *tx_buf = ctx->tx_buf;
412 		uint8_t *rx_buf = ctx->rx_buf;
413 
414 		if (chunk_len > dev_config->max_chunk_len) {
415 			chunk_len = dev_config->max_chunk_len;
416 		}
417 
418 #ifdef SPI_BUFFER_IN_RAM
419 		if (spi_context_tx_buf_on(ctx) &&
420 		    !nrf_dma_accessible_check(&dev_config->spim.p_reg, tx_buf)) {
421 
422 			if (chunk_len > CONFIG_SPI_NRFX_RAM_BUFFER_SIZE) {
423 				chunk_len = CONFIG_SPI_NRFX_RAM_BUFFER_SIZE;
424 			}
425 
426 			memcpy(dev_data->tx_buffer, tx_buf, chunk_len);
427 #ifdef CONFIG_DCACHE
428 			if (dev_config->mem_attr & DT_MEM_CACHEABLE) {
429 				sys_cache_data_flush_range(dev_data->tx_buffer, chunk_len);
430 			}
431 #endif
432 			tx_buf = dev_data->tx_buffer;
433 		}
434 
435 		if (spi_context_rx_buf_on(ctx) &&
436 		    !nrf_dma_accessible_check(&dev_config->spim.p_reg, rx_buf)) {
437 
438 			if (chunk_len > CONFIG_SPI_NRFX_RAM_BUFFER_SIZE) {
439 				chunk_len = CONFIG_SPI_NRFX_RAM_BUFFER_SIZE;
440 			}
441 
442 			rx_buf = dev_data->rx_buffer;
443 		}
444 #endif
445 
446 		dev_data->chunk_len = chunk_len;
447 
448 		xfer.p_tx_buffer = tx_buf;
449 		xfer.tx_length   = spi_context_tx_buf_on(ctx) ? chunk_len : 0;
450 		xfer.p_rx_buffer = rx_buf;
451 		xfer.rx_length   = spi_context_rx_buf_on(ctx) ? chunk_len : 0;
452 
453 #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58
454 		if (xfer.rx_length == 1 && xfer.tx_length <= 1) {
455 			if (dev_config->anomaly_58_workaround) {
456 				anomaly_58_workaround_setup(dev);
457 			} else {
458 				LOG_WRN("Transaction aborted since it would trigger "
459 					"nRF52832 PAN 58");
460 				error = -EIO;
461 			}
462 		}
463 #endif
464 		if (error == 0) {
465 			result = nrfx_spim_xfer(&dev_config->spim, &xfer, 0);
466 			if (result == NRFX_SUCCESS) {
467 				return;
468 			}
469 			error = -EIO;
470 #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58
471 			anomaly_58_workaround_clear(dev_data);
472 #endif
473 		}
474 	}
475 
476 	finish_transaction(dev, error);
477 }
478 
event_handler(const nrfx_spim_evt_t * p_event,void * p_context)479 static void event_handler(const nrfx_spim_evt_t *p_event, void *p_context)
480 {
481 	const struct device *dev = p_context;
482 	struct spi_nrfx_data *dev_data = dev->data;
483 #ifdef CONFIG_DCACHE
484 	const struct spi_nrfx_config *dev_config = dev->config;
485 #endif
486 
487 	if (p_event->type == NRFX_SPIM_EVENT_DONE) {
488 		/* Chunk length is set to 0 when a transaction is aborted
489 		 * due to a timeout.
490 		 */
491 		if (dev_data->chunk_len == 0) {
492 			finish_transaction(dev_data->dev, -ETIMEDOUT);
493 			return;
494 		}
495 
496 #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58
497 		anomaly_58_workaround_clear(dev_data);
498 #endif
499 #ifdef SPI_BUFFER_IN_RAM
500 		if (spi_context_rx_buf_on(&dev_data->ctx) &&
501 		    p_event->xfer_desc.p_rx_buffer != NULL &&
502 		    p_event->xfer_desc.p_rx_buffer != dev_data->ctx.rx_buf) {
503 #ifdef CONFIG_DCACHE
504 			if (dev_config->mem_attr & DT_MEM_CACHEABLE) {
505 				sys_cache_data_invd_range(dev_data->rx_buffer, dev_data->chunk_len);
506 			}
507 #endif
508 			(void)memcpy(dev_data->ctx.rx_buf,
509 				     dev_data->rx_buffer,
510 				     dev_data->chunk_len);
511 		}
512 #endif
513 		spi_context_update_tx(&dev_data->ctx, 1, dev_data->chunk_len);
514 		spi_context_update_rx(&dev_data->ctx, 1, dev_data->chunk_len);
515 
516 		transfer_next_chunk(dev_data->dev);
517 	}
518 }
519 
transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)520 static int transceive(const struct device *dev,
521 		      const struct spi_config *spi_cfg,
522 		      const struct spi_buf_set *tx_bufs,
523 		      const struct spi_buf_set *rx_bufs,
524 		      bool asynchronous,
525 		      spi_callback_t cb,
526 		      void *userdata)
527 {
528 	struct spi_nrfx_data *dev_data = dev->data;
529 	const struct spi_nrfx_config *dev_config = dev->config;
530 	void *reg = dev_config->spim.p_reg;
531 	int error;
532 
533 	pm_device_runtime_get(dev);
534 	spi_context_lock(&dev_data->ctx, asynchronous, cb, userdata, spi_cfg);
535 
536 	error = configure(dev, spi_cfg);
537 
538 	if (error == 0 && !IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
539 		error = request_clock(dev);
540 	}
541 
542 	if (error == 0) {
543 		dev_data->busy = true;
544 
545 		if (dev_config->wake_pin != WAKE_PIN_NOT_USED) {
546 			error = spi_nrfx_wake_request(&dev_config->wake_gpiote,
547 						      dev_config->wake_pin);
548 			if (error == -ETIMEDOUT) {
549 				LOG_WRN("Waiting for WAKE acknowledgment timed out");
550 				/* If timeout occurs, try to perform the transfer
551 				 * anyway, just in case the slave device was unable
552 				 * to signal that it was already awaken and prepared
553 				 * for the transfer.
554 				 */
555 			}
556 		}
557 
558 		spi_context_buffers_setup(&dev_data->ctx, tx_bufs, rx_bufs, 1);
559 		if (NRF_SPIM_IS_320MHZ_SPIM(reg)) {
560 			nrfy_spim_enable(reg);
561 		}
562 		spi_context_cs_control(&dev_data->ctx, true);
563 
564 		transfer_next_chunk(dev);
565 
566 		error = spi_context_wait_for_completion(&dev_data->ctx);
567 		if (error == -ETIMEDOUT) {
568 			/* Set the chunk length to 0 so that event_handler()
569 			 * knows that the transaction timed out and is to be
570 			 * aborted.
571 			 */
572 			dev_data->chunk_len = 0;
573 			/* Abort the current transfer by deinitializing
574 			 * the nrfx driver.
575 			 */
576 			nrfx_spim_uninit(&dev_config->spim);
577 			dev_data->initialized = false;
578 
579 			/* Make sure the transaction is finished (it may be
580 			 * already finished if it actually did complete before
581 			 * the nrfx driver was deinitialized).
582 			 */
583 			finish_transaction(dev, -ETIMEDOUT);
584 
585 			/* Clean up the driver state. */
586 			k_sem_reset(&dev_data->ctx.sync);
587 #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58
588 			anomaly_58_workaround_clear(dev_data);
589 #endif
590 		} else if (error) {
591 			finalize_spi_transaction(dev, true);
592 		}
593 	} else {
594 		pm_device_runtime_put(dev);
595 	}
596 
597 	spi_context_release(&dev_data->ctx, error);
598 
599 	return error;
600 }
601 
spi_nrfx_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)602 static int spi_nrfx_transceive(const struct device *dev,
603 			       const struct spi_config *spi_cfg,
604 			       const struct spi_buf_set *tx_bufs,
605 			       const struct spi_buf_set *rx_bufs)
606 {
607 	return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL);
608 }
609 
610 #ifdef CONFIG_SPI_ASYNC
spi_nrfx_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)611 static int spi_nrfx_transceive_async(const struct device *dev,
612 				     const struct spi_config *spi_cfg,
613 				     const struct spi_buf_set *tx_bufs,
614 				     const struct spi_buf_set *rx_bufs,
615 				     spi_callback_t cb,
616 				     void *userdata)
617 {
618 	return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata);
619 }
620 #endif /* CONFIG_SPI_ASYNC */
621 
spi_nrfx_release(const struct device * dev,const struct spi_config * spi_cfg)622 static int spi_nrfx_release(const struct device *dev,
623 			    const struct spi_config *spi_cfg)
624 {
625 	struct spi_nrfx_data *dev_data = dev->data;
626 
627 	if (!spi_context_configured(&dev_data->ctx, spi_cfg)) {
628 		return -EINVAL;
629 	}
630 
631 	if (dev_data->busy) {
632 		return -EBUSY;
633 	}
634 
635 	spi_context_unlock_unconditionally(&dev_data->ctx);
636 	finalize_spi_transaction(dev, false);
637 
638 	return 0;
639 }
640 
641 static DEVICE_API(spi, spi_nrfx_driver_api) = {
642 	.transceive = spi_nrfx_transceive,
643 #ifdef CONFIG_SPI_ASYNC
644 	.transceive_async = spi_nrfx_transceive_async,
645 #endif
646 #ifdef CONFIG_SPI_RTIO
647 	.iodev_submit = spi_rtio_iodev_default_submit,
648 #endif
649 	.release = spi_nrfx_release,
650 };
651 
spim_resume(const struct device * dev)652 static int spim_resume(const struct device *dev)
653 {
654 	const struct spi_nrfx_config *dev_config = dev->config;
655 
656 	(void)pinctrl_apply_state(dev_config->pcfg, PINCTRL_STATE_DEFAULT);
657 	/* nrfx_spim_init() will be called at configuration before
658 	 * the next transfer.
659 	 */
660 
661 #ifdef CONFIG_SOC_NRF54H20_GPD
662 	nrf_gpd_retain_pins_set(dev_config->pcfg, false);
663 #endif
664 
665 	return IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME) ? request_clock(dev) : 0;
666 }
667 
spim_suspend(const struct device * dev)668 static void spim_suspend(const struct device *dev)
669 {
670 	const struct spi_nrfx_config *dev_config = dev->config;
671 	struct spi_nrfx_data *dev_data = dev->data;
672 
673 	if (dev_data->initialized) {
674 		nrfx_spim_uninit(&dev_config->spim);
675 		dev_data->initialized = false;
676 	}
677 
678 	if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
679 		release_clock(dev);
680 	}
681 
682 #ifdef CONFIG_SOC_NRF54H20_GPD
683 	nrf_gpd_retain_pins_set(dev_config->pcfg, true);
684 #endif
685 
686 	(void)pinctrl_apply_state(dev_config->pcfg, PINCTRL_STATE_SLEEP);
687 }
688 
spim_nrfx_pm_action(const struct device * dev,enum pm_device_action action)689 static int spim_nrfx_pm_action(const struct device *dev, enum pm_device_action action)
690 {
691 	if (action == PM_DEVICE_ACTION_RESUME) {
692 		return spim_resume(dev);
693 	} else if (IS_ENABLED(CONFIG_PM_DEVICE) && (action == PM_DEVICE_ACTION_SUSPEND)) {
694 		spim_suspend(dev);
695 	} else {
696 		return -ENOTSUP;
697 	}
698 
699 	return 0;
700 }
701 
spi_nrfx_init(const struct device * dev)702 static int spi_nrfx_init(const struct device *dev)
703 {
704 	const struct spi_nrfx_config *dev_config = dev->config;
705 	struct spi_nrfx_data *dev_data = dev->data;
706 	int err;
707 
708 	err = pinctrl_apply_state(dev_config->pcfg, PINCTRL_STATE_DEFAULT);
709 	if (err < 0) {
710 		return err;
711 	}
712 
713 	if (dev_config->wake_pin != WAKE_PIN_NOT_USED) {
714 		err = spi_nrfx_wake_init(&dev_config->wake_gpiote, dev_config->wake_pin);
715 		if (err == -ENODEV) {
716 			LOG_ERR("Failed to allocate GPIOTE channel for WAKE");
717 			return err;
718 		}
719 		if (err == -EIO) {
720 			LOG_ERR("Failed to configure WAKE pin");
721 			return err;
722 		}
723 	}
724 
725 	dev_config->irq_connect();
726 
727 	err = spi_context_cs_configure_all(&dev_data->ctx);
728 	if (err < 0) {
729 		return err;
730 	}
731 
732 	spi_context_unlock_unconditionally(&dev_data->ctx);
733 
734 #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58
735 	err = anomaly_58_workaround_init(dev);
736 	if (err < 0) {
737 		return err;
738 	}
739 #endif
740 	return pm_device_driver_init(dev, spim_nrfx_pm_action);
741 }
742 /*
743  * We use NODELABEL here because the nrfx API requires us to call
744  * functions which are named according to SoC peripheral instance
745  * being operated on. Since DT_INST() makes no guarantees about that,
746  * it won't work.
747  */
748 #define SPIM(idx)			DT_NODELABEL(spi##idx)
749 #define SPIM_PROP(idx, prop)		DT_PROP(SPIM(idx), prop)
750 #define SPIM_HAS_PROP(idx, prop)	DT_NODE_HAS_PROP(SPIM(idx), prop)
751 #define SPIM_MEM_REGION(idx)		DT_PHANDLE(SPIM(idx), memory_regions)
752 
753 #define SPI_NRFX_SPIM_EXTENDED_CONFIG(idx)				\
754 	IF_ENABLED(NRFX_SPIM_EXTENDED_ENABLED,				\
755 		(.dcx_pin = NRF_SPIM_PIN_NOT_CONNECTED,			\
756 		 COND_CODE_1(SPIM_PROP(idx, rx_delay_supported),	\
757 			     (.rx_delay = SPIM_PROP(idx, rx_delay),),	\
758 			     ())					\
759 		))
760 
761 #define SPIM_GET_MEM_ATTR(idx)								 \
762 	COND_CODE_1(SPIM_HAS_PROP(idx, memory_regions),					 \
763 		(COND_CODE_1(DT_NODE_HAS_PROP(SPIM_MEM_REGION(idx), zephyr_memory_attr), \
764 			(DT_PROP(SPIM_MEM_REGION(idx), zephyr_memory_attr)),		 \
765 			(0))),								 \
766 		(0))
767 
768 /* Fast instances depend on the global HSFLL clock controller (as they need
769  * to request the highest frequency from it to operate correctly), so they
770  * must be initialized after that controller driver, hence the default SPI
771  * initialization priority may be too early for them.
772  */
773 #if defined(CONFIG_CLOCK_CONTROL_NRF2_GLOBAL_HSFLL_INIT_PRIORITY) && \
774 	CONFIG_SPI_INIT_PRIORITY < CONFIG_CLOCK_CONTROL_NRF2_GLOBAL_HSFLL_INIT_PRIORITY
775 #define SPIM_INIT_PRIORITY(idx) \
776 	COND_CODE_1(SPIM_REQUESTS_CLOCK(idx), \
777 		(UTIL_INC(CONFIG_CLOCK_CONTROL_NRF2_GLOBAL_HSFLL_INIT_PRIORITY)), \
778 		(CONFIG_SPI_INIT_PRIORITY))
779 #else
780 #define SPIM_INIT_PRIORITY(idx) CONFIG_SPI_INIT_PRIORITY
781 #endif
782 
783 #define SPI_NRFX_SPIM_DEFINE(idx)					       \
784 	NRF_DT_CHECK_NODE_HAS_PINCTRL_SLEEP(SPIM(idx));			       \
785 	static void irq_connect##idx(void)				       \
786 	{								       \
787 		IRQ_CONNECT(DT_IRQN(SPIM(idx)), DT_IRQ(SPIM(idx), priority),   \
788 			    nrfx_isr, nrfx_spim_##idx##_irq_handler, 0);       \
789 	}								       \
790 	IF_ENABLED(SPI_BUFFER_IN_RAM,					       \
791 		(static uint8_t spim_##idx##_tx_buffer			       \
792 			[CONFIG_SPI_NRFX_RAM_BUFFER_SIZE]		       \
793 			SPIM_MEMORY_SECTION(idx);			       \
794 		 static uint8_t spim_##idx##_rx_buffer			       \
795 			[CONFIG_SPI_NRFX_RAM_BUFFER_SIZE]		       \
796 			SPIM_MEMORY_SECTION(idx);))			       \
797 	static struct spi_nrfx_data spi_##idx##_data = {		       \
798 		SPI_CONTEXT_INIT_LOCK(spi_##idx##_data, ctx),		       \
799 		SPI_CONTEXT_INIT_SYNC(spi_##idx##_data, ctx),		       \
800 		SPI_CONTEXT_CS_GPIOS_INITIALIZE(SPIM(idx), ctx)		       \
801 		IF_ENABLED(SPI_BUFFER_IN_RAM,				       \
802 			(.tx_buffer = spim_##idx##_tx_buffer,		       \
803 			 .rx_buffer = spim_##idx##_rx_buffer,))		       \
804 		.dev  = DEVICE_DT_GET(SPIM(idx)),			       \
805 		.busy = false,						       \
806 	};								       \
807 	PINCTRL_DT_DEFINE(SPIM(idx));					       \
808 	static const struct spi_nrfx_config spi_##idx##z_config = {	       \
809 		.spim = {						       \
810 			.p_reg = (NRF_SPIM_Type *)DT_REG_ADDR(SPIM(idx)),      \
811 			.drv_inst_idx = NRFX_SPIM##idx##_INST_IDX,	       \
812 		},							       \
813 		.max_freq = SPIM_PROP(idx, max_frequency),		       \
814 		.def_config = {						       \
815 			.skip_gpio_cfg = true,				       \
816 			.skip_psel_cfg = true,				       \
817 			.ss_pin = NRF_SPIM_PIN_NOT_CONNECTED,		       \
818 			.orc    = SPIM_PROP(idx, overrun_character),	       \
819 			SPI_NRFX_SPIM_EXTENDED_CONFIG(idx)		       \
820 		},							       \
821 		.irq_connect = irq_connect##idx,			       \
822 		.pcfg = PINCTRL_DT_DEV_CONFIG_GET(SPIM(idx)),		       \
823 		.max_chunk_len = BIT_MASK(SPIM_PROP(idx, easydma_maxcnt_bits)),\
824 		COND_CODE_1(CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58,     \
825 			(.anomaly_58_workaround =			       \
826 				SPIM_PROP(idx, anomaly_58_workaround),),       \
827 			())						       \
828 		.wake_pin = NRF_DT_GPIOS_TO_PSEL_OR(SPIM(idx), wake_gpios,     \
829 						    WAKE_PIN_NOT_USED),	       \
830 		.wake_gpiote = WAKE_GPIOTE_INSTANCE(SPIM(idx)),		       \
831 		IF_ENABLED(CONFIG_DCACHE,				       \
832 			(.mem_attr = SPIM_GET_MEM_ATTR(idx),))		       \
833 		IF_ENABLED(USE_CLOCK_REQUESTS,			       \
834 			(.clk_dev = SPIM_REQUESTS_CLOCK(idx)		       \
835 				  ? DEVICE_DT_GET(DT_CLOCKS_CTLR(SPIM(idx)))   \
836 				  : NULL,				       \
837 			 .clk_spec = {					       \
838 				.frequency = NRF_CLOCK_CONTROL_FREQUENCY_MAX,  \
839 			 },))						       \
840 	};								       \
841 	BUILD_ASSERT(!SPIM_HAS_PROP(idx, wake_gpios) ||			       \
842 		     !(DT_GPIO_FLAGS(SPIM(idx), wake_gpios) & GPIO_ACTIVE_LOW),\
843 		     "WAKE line must be configured as active high");	       \
844 	PM_DEVICE_DT_DEFINE(SPIM(idx), spim_nrfx_pm_action);		       \
845 	SPI_DEVICE_DT_DEFINE(SPIM(idx),					       \
846 		      spi_nrfx_init,					       \
847 		      PM_DEVICE_DT_GET(SPIM(idx)),			       \
848 		      &spi_##idx##_data,				       \
849 		      &spi_##idx##z_config,				       \
850 		      POST_KERNEL, SPIM_INIT_PRIORITY(idx),		       \
851 		      &spi_nrfx_driver_api)
852 
853 #define SPIM_MEMORY_SECTION(idx)					       \
854 	COND_CODE_1(SPIM_HAS_PROP(idx, memory_regions),			       \
855 		(__attribute__((__section__(LINKER_DT_NODE_REGION_NAME(	       \
856 			SPIM_MEM_REGION(idx)))))),			       \
857 		())
858 
859 #ifdef CONFIG_HAS_HW_NRF_SPIM0
860 SPI_NRFX_SPIM_DEFINE(0);
861 #endif
862 
863 #ifdef CONFIG_HAS_HW_NRF_SPIM1
864 SPI_NRFX_SPIM_DEFINE(1);
865 #endif
866 
867 #ifdef CONFIG_HAS_HW_NRF_SPIM2
868 SPI_NRFX_SPIM_DEFINE(2);
869 #endif
870 
871 #ifdef CONFIG_HAS_HW_NRF_SPIM3
872 SPI_NRFX_SPIM_DEFINE(3);
873 #endif
874 
875 #ifdef CONFIG_HAS_HW_NRF_SPIM4
876 SPI_NRFX_SPIM_DEFINE(4);
877 #endif
878 
879 #ifdef CONFIG_HAS_HW_NRF_SPIM00
880 SPI_NRFX_SPIM_DEFINE(00);
881 #endif
882 
883 #ifdef CONFIG_HAS_HW_NRF_SPIM20
884 SPI_NRFX_SPIM_DEFINE(20);
885 #endif
886 
887 #ifdef CONFIG_HAS_HW_NRF_SPIM21
888 SPI_NRFX_SPIM_DEFINE(21);
889 #endif
890 
891 #ifdef CONFIG_HAS_HW_NRF_SPIM22
892 SPI_NRFX_SPIM_DEFINE(22);
893 #endif
894 
895 #ifdef CONFIG_HAS_HW_NRF_SPIM30
896 SPI_NRFX_SPIM_DEFINE(30);
897 #endif
898 
899 #ifdef CONFIG_HAS_HW_NRF_SPIM120
900 SPI_NRFX_SPIM_DEFINE(120);
901 #endif
902 
903 #ifdef CONFIG_HAS_HW_NRF_SPIM121
904 SPI_NRFX_SPIM_DEFINE(121);
905 #endif
906 
907 #ifdef CONFIG_HAS_HW_NRF_SPIM130
908 SPI_NRFX_SPIM_DEFINE(130);
909 #endif
910 
911 #ifdef CONFIG_HAS_HW_NRF_SPIM131
912 SPI_NRFX_SPIM_DEFINE(131);
913 #endif
914 
915 #ifdef CONFIG_HAS_HW_NRF_SPIM132
916 SPI_NRFX_SPIM_DEFINE(132);
917 #endif
918 
919 #ifdef CONFIG_HAS_HW_NRF_SPIM133
920 SPI_NRFX_SPIM_DEFINE(133);
921 #endif
922 
923 #ifdef CONFIG_HAS_HW_NRF_SPIM134
924 SPI_NRFX_SPIM_DEFINE(134);
925 #endif
926 
927 #ifdef CONFIG_HAS_HW_NRF_SPIM135
928 SPI_NRFX_SPIM_DEFINE(135);
929 #endif
930 
931 #ifdef CONFIG_HAS_HW_NRF_SPIM136
932 SPI_NRFX_SPIM_DEFINE(136);
933 #endif
934 
935 #ifdef CONFIG_HAS_HW_NRF_SPIM137
936 SPI_NRFX_SPIM_DEFINE(137);
937 #endif
938