1 /*
2  * Copyright (c) 2019-2024, Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT nordic_qspi_nor
8 
9 #include <errno.h>
10 #include <zephyr/drivers/flash.h>
11 #include <zephyr/init.h>
12 #include <zephyr/pm/device.h>
13 #include <zephyr/pm/device_runtime.h>
14 #include <zephyr/drivers/pinctrl.h>
15 #include <zephyr/sys/atomic.h>
16 #include <soc.h>
17 #include <string.h>
18 #include <zephyr/logging/log.h>
19 #include <zephyr/irq.h>
20 LOG_MODULE_REGISTER(qspi_nor, CONFIG_FLASH_LOG_LEVEL);
21 
22 #include "spi_nor.h"
23 #include "jesd216.h"
24 #include "flash_priv.h"
25 #include <nrf_erratas.h>
26 #include <nrfx_qspi.h>
27 #include <hal/nrf_clock.h>
28 #include <hal/nrf_gpio.h>
29 
30 struct qspi_nor_data {
31 #ifdef CONFIG_MULTITHREADING
32 	/* The semaphore to control exclusive access to the device. */
33 	struct k_sem sem;
34 	/* The semaphore to indicate that transfer has completed. */
35 	struct k_sem sync;
36 	/* A counter to control QSPI deactivation. */
37 	atomic_t usage_count;
38 #else /* CONFIG_MULTITHREADING */
39 	/* A flag that signals completed transfer when threads are
40 	 * not enabled.
41 	 */
42 	volatile bool ready;
43 #endif /* CONFIG_MULTITHREADING */
44 	bool xip_enabled;
45 };
46 
47 struct qspi_nor_config {
48 	nrfx_qspi_config_t nrfx_cfg;
49 
50 	/* Size from devicetree, in bytes */
51 	uint32_t size;
52 
53 	/* JEDEC id from devicetree */
54 	uint8_t id[SPI_NOR_MAX_ID_LEN];
55 
56 	const struct pinctrl_dev_config *pcfg;
57 };
58 
59 /* Status register bits */
60 #define QSPI_SECTOR_SIZE SPI_NOR_SECTOR_SIZE
61 #define QSPI_BLOCK_SIZE SPI_NOR_BLOCK_SIZE
62 
63 /* instance 0 flash size in bytes */
64 #if DT_INST_NODE_HAS_PROP(0, size_in_bytes)
65 #define INST_0_BYTES (DT_INST_PROP(0, size_in_bytes))
66 #elif DT_INST_NODE_HAS_PROP(0, size)
67 #define INST_0_BYTES (DT_INST_PROP(0, size) / 8)
68 #else
69 #error "No size specified. 'size' or 'size-in-bytes' must be set"
70 #endif
71 
72 BUILD_ASSERT(!(DT_INST_NODE_HAS_PROP(0, size_in_bytes) && DT_INST_NODE_HAS_PROP(0, size)),
73 	     "Node " DT_NODE_PATH(DT_DRV_INST(0)) " has both size and size-in-bytes "
74 	     "properties; use exactly one");
75 
76 
77 #define INST_0_SCK_FREQUENCY DT_INST_PROP(0, sck_frequency)
78 /*
79  * According to the respective specifications, the nRF52 QSPI supports clock
80  * frequencies 2 - 32 MHz and the nRF53 one supports 6 - 96 MHz.
81  */
82 BUILD_ASSERT(INST_0_SCK_FREQUENCY >= (NRF_QSPI_BASE_CLOCK_FREQ / 16),
83 	     "Unsupported SCK frequency.");
84 
85 /*
86  * Determine a configuration value (INST_0_SCK_CFG) and, if needed, a divider
87  * (BASE_CLOCK_DIV) for the clock from which the SCK frequency is derived that
88  * need to be used to achieve the SCK frequency as close as possible (but not
89  * higher) to the one specified in DT.
90  */
91 #if defined(CONFIG_SOC_SERIES_NRF53X)
92 /*
93  * On nRF53 Series SoCs, the default /4 divider for the HFCLK192M clock can
94  * only be used when the QSPI peripheral is idle. When a QSPI operation is
95  * performed, the divider needs to be changed to /1 or /2 (particularly,
96  * the specification says that the peripheral "supports 192 MHz and 96 MHz
97  * PCLK192M frequency"), but after that operation is complete, the default
98  * divider needs to be restored to avoid increased current consumption.
99  */
100 #if (INST_0_SCK_FREQUENCY >= NRF_QSPI_BASE_CLOCK_FREQ)
101 /* For requested SCK >= 96 MHz, use HFCLK192M / 1 / (2*1) = 96 MHz */
102 #define BASE_CLOCK_DIV NRF_CLOCK_HFCLK_DIV_1
103 #define INST_0_SCK_CFG NRF_QSPI_FREQ_DIV1
104 /* If anomaly 159 is to be prevented, only /1 divider can be used. */
105 #elif NRF53_ERRATA_159_ENABLE_WORKAROUND
106 #define BASE_CLOCK_DIV NRF_CLOCK_HFCLK_DIV_1
107 #define INST_0_SCK_CFG (DIV_ROUND_UP(NRF_QSPI_BASE_CLOCK_FREQ, \
108 				     INST_0_SCK_FREQUENCY) - 1)
109 #elif (INST_0_SCK_FREQUENCY >= (NRF_QSPI_BASE_CLOCK_FREQ / 2))
110 /* For 96 MHz > SCK >= 48 MHz, use HFCLK192M / 2 / (2*1) = 48 MHz */
111 #define BASE_CLOCK_DIV NRF_CLOCK_HFCLK_DIV_2
112 #define INST_0_SCK_CFG NRF_QSPI_FREQ_DIV1
113 #elif (INST_0_SCK_FREQUENCY >= (NRF_QSPI_BASE_CLOCK_FREQ / 3))
114 /* For 48 MHz > SCK >= 32 MHz, use HFCLK192M / 1 / (2*3) = 32 MHz */
115 #define BASE_CLOCK_DIV NRF_CLOCK_HFCLK_DIV_1
116 #define INST_0_SCK_CFG NRF_QSPI_FREQ_DIV3
117 #else
118 /* For requested SCK < 32 MHz, use divider /2 for HFCLK192M. */
119 #define BASE_CLOCK_DIV NRF_CLOCK_HFCLK_DIV_2
120 #define INST_0_SCK_CFG (DIV_ROUND_UP(NRF_QSPI_BASE_CLOCK_FREQ / 2, \
121 				     INST_0_SCK_FREQUENCY) - 1)
122 #endif
123 /* After the base clock divider is changed, some time is needed for the new
124  * setting to take effect. This value specifies the delay (in microseconds)
125  * to be applied to ensure that the clock is ready when the QSPI operation
126  * starts. It was measured with a logic analyzer (unfortunately, the nRF5340
127  * specification does not provide any numbers in this regard).
128  */
129 #define BASE_CLOCK_SWITCH_DELAY_US 7
130 
131 #else
132 /*
133  * On nRF52 Series SoCs, the base clock divider is not configurable,
134  * so BASE_CLOCK_DIV is not defined.
135  */
136 #if (INST_0_SCK_FREQUENCY >= NRF_QSPI_BASE_CLOCK_FREQ)
137 #define INST_0_SCK_CFG NRF_QSPI_FREQ_DIV1
138 #else
139 #define INST_0_SCK_CFG (DIV_ROUND_UP(NRF_QSPI_BASE_CLOCK_FREQ, \
140 					 INST_0_SCK_FREQUENCY) - 1)
141 
142 #endif
143 
144 #endif /* defined(CONFIG_SOC_SERIES_NRF53X) */
145 
146 /* 0 for MODE0 (CPOL=0, CPHA=0), 1 for MODE3 (CPOL=1, CPHA=1). */
147 #define INST_0_SPI_MODE DT_INST_PROP(0, cpol)
148 BUILD_ASSERT(DT_INST_PROP(0, cpol) == DT_INST_PROP(0, cpha),
149 	     "Invalid combination of \"cpol\" and \"cpha\" properties.");
150 
151 /* for accessing devicetree properties of the bus node */
152 #define QSPI_NODE DT_INST_BUS(0)
153 #define QSPI_PROP_AT(prop, idx) DT_PROP_BY_IDX(QSPI_NODE, prop, idx)
154 #define QSPI_PROP_LEN(prop) DT_PROP_LEN(QSPI_NODE, prop)
155 
156 #define INST_0_QER _CONCAT(JESD216_DW15_QER_VAL_, \
157 			   DT_STRING_TOKEN(DT_DRV_INST(0), \
158 					   quad_enable_requirements))
159 
160 #define IS_EQUAL(x, y) ((x) == (y))
161 #define SR1_WRITE_CLEARS_SR2 IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v1)
162 
163 #define SR2_WRITE_NEEDS_SR1  (IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v1) || \
164 			      IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v4) || \
165 			      IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v5))
166 
167 #define QER_IS_S2B1 (IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v1) || \
168 		     IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v4) || \
169 		     IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v5) || \
170 		     IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v6))
171 
172 BUILD_ASSERT((IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_NONE)
173 	      || IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S1B6)
174 	      || IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v1)
175 	      || IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v4)
176 	      || IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v5)
177 	      || IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v6)),
178 	     "Driver only supports NONE, S1B6, S2B1v1, S2B1v4, S2B1v5 or S2B1v6 for quad-enable-requirements");
179 
180 #define INST_0_4BA DT_INST_PROP_OR(0, enter_4byte_addr, 0)
181 #if (INST_0_4BA != 0)
182 BUILD_ASSERT(((INST_0_4BA & 0x03) != 0),
183 	     "Driver only supports command (0xB7) for entering 4 byte addressing mode");
184 BUILD_ASSERT(DT_INST_PROP(0, address_size_32),
185 	    "After entering 4 byte addressing mode, 4 byte addressing is expected");
186 #endif
187 
188 void z_impl_nrf_qspi_nor_xip_enable(const struct device *dev, bool enable);
189 void z_vrfy_nrf_qspi_nor_xip_enable(const struct device *dev, bool enable);
190 
191 #define WORD_SIZE 4
192 
193 /**
194  * @brief QSPI buffer structure
195  * Structure used both for TX and RX purposes.
196  *
197  * @param buf is a valid pointer to a data buffer.
198  * Can not be NULL.
199  * @param len is the length of the data to be handled.
200  * If no data to transmit/receive - pass 0.
201  */
202 struct qspi_buf {
203 	uint8_t *buf;
204 	size_t len;
205 };
206 
207 /**
208  * @brief QSPI command structure
209  * Structure used for custom command usage.
210  *
211  * @param op_code is a command value (i.e 0x9F - get Jedec ID)
212  * @param tx_buf structure used for TX purposes. Can be NULL if not used.
213  * @param rx_buf structure used for RX purposes. Can be NULL if not used.
214  */
215 struct qspi_cmd {
216 	uint8_t op_code;
217 	const struct qspi_buf *tx_buf;
218 	const struct qspi_buf *rx_buf;
219 };
220 
221 static int qspi_nor_write_protection_set(const struct device *dev,
222 					 bool write_protect);
223 
224 static int exit_dpd(const struct device *const dev);
225 
226 /**
227  * @brief Test whether offset is aligned.
228  */
229 #define QSPI_IS_SECTOR_ALIGNED(_ofs) (((_ofs) & (QSPI_SECTOR_SIZE - 1U)) == 0)
230 #define QSPI_IS_BLOCK_ALIGNED(_ofs) (((_ofs) & (QSPI_BLOCK_SIZE - 1U)) == 0)
231 
232 /**
233  * @brief Converts NRFX return codes to the zephyr ones
234  */
qspi_get_zephyr_ret_code(nrfx_err_t res)235 static inline int qspi_get_zephyr_ret_code(nrfx_err_t res)
236 {
237 	switch (res) {
238 	case NRFX_SUCCESS:
239 		return 0;
240 	case NRFX_ERROR_INVALID_PARAM:
241 	case NRFX_ERROR_INVALID_ADDR:
242 		return -EINVAL;
243 	case NRFX_ERROR_INVALID_STATE:
244 		return -ECANCELED;
245 #if NRF53_ERRATA_159_ENABLE_WORKAROUND
246 	case NRFX_ERROR_FORBIDDEN:
247 		LOG_ERR("nRF5340 anomaly 159 conditions detected");
248 		LOG_ERR("Set the CPU clock to 64 MHz before starting QSPI operation");
249 		return -ECANCELED;
250 #endif
251 	case NRFX_ERROR_BUSY:
252 	case NRFX_ERROR_TIMEOUT:
253 	default:
254 		return -EBUSY;
255 	}
256 }
257 
qspi_lock(const struct device * dev)258 static inline void qspi_lock(const struct device *dev)
259 {
260 #ifdef CONFIG_MULTITHREADING
261 	struct qspi_nor_data *dev_data = dev->data;
262 
263 	k_sem_take(&dev_data->sem, K_FOREVER);
264 #endif
265 }
266 
qspi_unlock(const struct device * dev)267 static inline void qspi_unlock(const struct device *dev)
268 {
269 #ifdef CONFIG_MULTITHREADING
270 	struct qspi_nor_data *dev_data = dev->data;
271 
272 	k_sem_give(&dev_data->sem);
273 #endif
274 }
275 
qspi_clock_div_change(void)276 static inline void qspi_clock_div_change(void)
277 {
278 #ifdef CONFIG_SOC_SERIES_NRF53X
279 	/* Make sure the base clock divider is changed accordingly
280 	 * before a QSPI transfer is performed.
281 	 */
282 	nrf_clock_hfclk192m_div_set(NRF_CLOCK, BASE_CLOCK_DIV);
283 	k_busy_wait(BASE_CLOCK_SWITCH_DELAY_US);
284 #endif
285 }
286 
qspi_clock_div_restore(void)287 static inline void qspi_clock_div_restore(void)
288 {
289 #ifdef CONFIG_SOC_SERIES_NRF53X
290 	/* Restore the default base clock divider to reduce power
291 	 * consumption when the QSPI peripheral is idle.
292 	 */
293 	nrf_clock_hfclk192m_div_set(NRF_CLOCK, NRF_CLOCK_HFCLK_DIV_4);
294 #endif
295 }
296 
qspi_acquire(const struct device * dev)297 static void qspi_acquire(const struct device *dev)
298 {
299 	struct qspi_nor_data *dev_data = dev->data;
300 	int rc;
301 
302 	rc = pm_device_runtime_get(dev);
303 	if (rc < 0) {
304 		LOG_ERR("pm_device_runtime_get failed: %d", rc);
305 	}
306 #if defined(CONFIG_MULTITHREADING)
307 	/* In multithreading, the driver can call qspi_acquire more than once
308 	 * before calling qspi_release. Keeping count, so QSPI is deactivated
309 	 * only at the last call (usage_count == 0).
310 	 */
311 	atomic_inc(&dev_data->usage_count);
312 #endif
313 
314 	qspi_lock(dev);
315 
316 	if (!dev_data->xip_enabled) {
317 		qspi_clock_div_change();
318 
319 		pm_device_busy_set(dev);
320 	}
321 }
322 
qspi_release(const struct device * dev)323 static void qspi_release(const struct device *dev)
324 {
325 	struct qspi_nor_data *dev_data = dev->data;
326 	bool deactivate = true;
327 	int rc;
328 
329 #if defined(CONFIG_MULTITHREADING)
330 	/* The last thread to finish using the driver deactivates the QSPI */
331 	deactivate = atomic_dec(&dev_data->usage_count) == 1;
332 #endif
333 
334 	if (!dev_data->xip_enabled) {
335 		qspi_clock_div_restore();
336 
337 		if (deactivate) {
338 			(void) nrfx_qspi_deactivate();
339 		}
340 
341 		pm_device_busy_clear(dev);
342 	}
343 
344 	qspi_unlock(dev);
345 
346 	rc = pm_device_runtime_put(dev);
347 	if (rc < 0) {
348 		LOG_ERR("pm_device_runtime_put failed: %d", rc);
349 	}
350 }
351 
qspi_wait_for_completion(const struct device * dev,nrfx_err_t res)352 static inline void qspi_wait_for_completion(const struct device *dev,
353 					    nrfx_err_t res)
354 {
355 	struct qspi_nor_data *dev_data = dev->data;
356 
357 	if (res == NRFX_SUCCESS) {
358 #ifdef CONFIG_MULTITHREADING
359 		k_sem_take(&dev_data->sync, K_FOREVER);
360 #else /* CONFIG_MULTITHREADING */
361 		unsigned int key = irq_lock();
362 
363 		while (!dev_data->ready) {
364 			k_cpu_atomic_idle(key);
365 			key = irq_lock();
366 		}
367 		dev_data->ready = false;
368 		irq_unlock(key);
369 #endif /* CONFIG_MULTITHREADING */
370 	}
371 }
372 
qspi_complete(struct qspi_nor_data * dev_data)373 static inline void qspi_complete(struct qspi_nor_data *dev_data)
374 {
375 #ifdef CONFIG_MULTITHREADING
376 	k_sem_give(&dev_data->sync);
377 #else /* CONFIG_MULTITHREADING */
378 	dev_data->ready = true;
379 #endif /* CONFIG_MULTITHREADING */
380 }
381 
382 /**
383  * @brief QSPI handler
384  *
385  * @param event Driver event type
386  * @param p_context Pointer to context. Use in interrupt handler.
387  * @retval None
388  */
qspi_handler(nrfx_qspi_evt_t event,void * p_context)389 static void qspi_handler(nrfx_qspi_evt_t event, void *p_context)
390 {
391 	struct qspi_nor_data *dev_data = p_context;
392 
393 	if (event == NRFX_QSPI_EVENT_DONE) {
394 		qspi_complete(dev_data);
395 	}
396 }
397 
398 /* QSPI send custom command.
399  *
400  * If this is used for both send and receive the buffer sizes must be
401  * equal and cover the whole transaction.
402  */
qspi_send_cmd(const struct device * dev,const struct qspi_cmd * cmd,bool wren)403 static int qspi_send_cmd(const struct device *dev, const struct qspi_cmd *cmd,
404 			 bool wren)
405 {
406 	/* Check input parameters */
407 	if (!cmd) {
408 		return -EINVAL;
409 	}
410 
411 	const void *tx_buf = NULL;
412 	size_t tx_len = 0;
413 	void *rx_buf = NULL;
414 	size_t rx_len = 0;
415 	size_t xfer_len = sizeof(cmd->op_code);
416 
417 	if (cmd->tx_buf) {
418 		tx_buf = cmd->tx_buf->buf;
419 		tx_len = cmd->tx_buf->len;
420 	}
421 
422 	if (cmd->rx_buf) {
423 		rx_buf = cmd->rx_buf->buf;
424 		rx_len = cmd->rx_buf->len;
425 	}
426 
427 	if ((rx_len != 0) && (tx_len != 0)) {
428 		if (rx_len != tx_len) {
429 			return -EINVAL;
430 		}
431 
432 		xfer_len += tx_len;
433 	} else {
434 		/* At least one of these is zero. */
435 		xfer_len += tx_len + rx_len;
436 	}
437 
438 	if (xfer_len > NRF_QSPI_CINSTR_LEN_9B) {
439 		LOG_WRN("cinstr %02x transfer too long: %zu",
440 			cmd->op_code, xfer_len);
441 		return -EINVAL;
442 	}
443 
444 	nrf_qspi_cinstr_conf_t cinstr_cfg = {
445 		.opcode = cmd->op_code,
446 		.length = xfer_len,
447 		.io2_level = true,
448 		.io3_level = true,
449 		.wipwait = false,
450 		.wren = wren,
451 	};
452 
453 	int res = nrfx_qspi_cinstr_xfer(&cinstr_cfg, tx_buf, rx_buf);
454 
455 	return qspi_get_zephyr_ret_code(res);
456 }
457 
458 #if !IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_NONE)
459 /* RDSR.  Negative value is error. */
qspi_rdsr(const struct device * dev,uint8_t sr_num)460 static int qspi_rdsr(const struct device *dev, uint8_t sr_num)
461 {
462 	uint8_t opcode = SPI_NOR_CMD_RDSR;
463 
464 	if (sr_num > 2 || sr_num == 0) {
465 		return -EINVAL;
466 	}
467 	if (sr_num == 2) {
468 		opcode = SPI_NOR_CMD_RDSR2;
469 	}
470 	uint8_t sr = 0xFF;
471 	const struct qspi_buf sr_buf = {
472 		.buf = &sr,
473 		.len = sizeof(sr),
474 	};
475 	struct qspi_cmd cmd = {
476 		.op_code = opcode,
477 		.rx_buf = &sr_buf,
478 	};
479 	int rc = qspi_send_cmd(dev, &cmd, false);
480 
481 	return (rc < 0) ? rc : sr;
482 }
483 
484 /* Wait until RDSR confirms write is not in progress. */
qspi_wait_while_writing(const struct device * dev,k_timeout_t poll_period)485 static int qspi_wait_while_writing(const struct device *dev, k_timeout_t poll_period)
486 {
487 	int rc;
488 
489 	do {
490 #ifdef CONFIG_MULTITHREADING
491 		if (!K_TIMEOUT_EQ(poll_period, K_NO_WAIT)) {
492 			k_sleep(poll_period);
493 		}
494 #endif
495 		rc = qspi_rdsr(dev, 1);
496 	} while ((rc >= 0)
497 		 && ((rc & SPI_NOR_WIP_BIT) != 0U));
498 
499 	return (rc < 0) ? rc : 0;
500 }
501 
qspi_wrsr(const struct device * dev,uint8_t sr_val,uint8_t sr_num)502 static int qspi_wrsr(const struct device *dev, uint8_t sr_val, uint8_t sr_num)
503 {
504 	int rc = 0;
505 	uint8_t opcode = SPI_NOR_CMD_WRSR;
506 	uint8_t length = 1;
507 	uint8_t sr_array[2] = {0};
508 
509 	if (sr_num > 2 || sr_num == 0) {
510 		return -EINVAL;
511 	}
512 
513 	if (sr_num == 1) {
514 		sr_array[0] = sr_val;
515 #if SR1_WRITE_CLEARS_SR2
516 		/* Writing sr1 clears sr2. need to read/modify/write both. */
517 		rc = qspi_rdsr(dev, 2);
518 		if (rc < 0) {
519 			LOG_ERR("RDSR for WRSR failed: %d", rc);
520 			return rc;
521 		}
522 		sr_array[1] = rc;
523 		length = 2;
524 #endif
525 	} else { /* sr_num == 2 */
526 
527 #if SR2_WRITE_NEEDS_SR1
528 		/* Writing sr2 requires writing sr1 as well.
529 		 * Uses standard WRSR opcode
530 		 */
531 		sr_array[1] = sr_val;
532 		rc = qspi_rdsr(dev, 1);
533 		if (rc < 0) {
534 			LOG_ERR("RDSR for WRSR failed: %d", rc);
535 			return rc;
536 		}
537 		sr_array[0] = rc;
538 		length = 2;
539 #elif IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v6)
540 		/* Writing sr2 uses a dedicated WRSR2 command */
541 		sr_array[0] = sr_val;
542 		opcode = SPI_NOR_CMD_WRSR2;
543 #else
544 		LOG_ERR("Attempted to write status register 2, but no known method to write sr2");
545 		return -EINVAL;
546 #endif
547 	}
548 
549 	const struct qspi_buf sr_buf = {
550 		.buf = sr_array,
551 		.len = length,
552 	};
553 	struct qspi_cmd cmd = {
554 		.op_code = opcode,
555 		.tx_buf = &sr_buf,
556 	};
557 
558 	rc = qspi_send_cmd(dev, &cmd, true);
559 
560 	/* Writing SR can take some time, and further
561 	 * commands sent while it's happening can be
562 	 * corrupted.  Wait.
563 	 */
564 	if (rc == 0) {
565 		rc = qspi_wait_while_writing(dev, K_NO_WAIT);
566 	}
567 
568 	return rc;
569 }
570 #endif /* !IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_NONE) */
571 
572 /* QSPI erase */
qspi_erase(const struct device * dev,uint32_t addr,uint32_t size)573 static int qspi_erase(const struct device *dev, uint32_t addr, uint32_t size)
574 {
575 	const struct qspi_nor_config *params = dev->config;
576 	int rc, rc2;
577 
578 	rc = qspi_nor_write_protection_set(dev, false);
579 	if (rc != 0) {
580 		return rc;
581 	}
582 	while (size > 0) {
583 		nrfx_err_t res = !NRFX_SUCCESS;
584 		uint32_t adj = 0;
585 
586 		if (size == params->size) {
587 			/* chip erase */
588 			res = nrfx_qspi_chip_erase();
589 			adj = size;
590 		} else if ((size >= QSPI_BLOCK_SIZE) &&
591 			   QSPI_IS_BLOCK_ALIGNED(addr)) {
592 			/* 64 kB block erase */
593 			res = nrfx_qspi_erase(NRF_QSPI_ERASE_LEN_64KB, addr);
594 			adj = QSPI_BLOCK_SIZE;
595 		} else if ((size >= QSPI_SECTOR_SIZE) &&
596 			   QSPI_IS_SECTOR_ALIGNED(addr)) {
597 			/* 4kB sector erase */
598 			res = nrfx_qspi_erase(NRF_QSPI_ERASE_LEN_4KB, addr);
599 			adj = QSPI_SECTOR_SIZE;
600 		} else {
601 			/* minimal erase size is at least a sector size */
602 			LOG_ERR("unsupported at 0x%lx size %zu", (long)addr, size);
603 			res = NRFX_ERROR_INVALID_PARAM;
604 		}
605 
606 		qspi_wait_for_completion(dev, res);
607 		if (res == NRFX_SUCCESS) {
608 			addr += adj;
609 			size -= adj;
610 
611 			/* Erasing flash pages takes a significant period of time and the
612 			 * flash memory is unavailable to perform additional operations
613 			 * until done.
614 			 */
615 			rc = qspi_wait_while_writing(dev, K_MSEC(10));
616 			if (rc < 0) {
617 				LOG_ERR("wait error at 0x%lx size %zu", (long)addr, size);
618 				break;
619 			}
620 		} else {
621 			LOG_ERR("erase error at 0x%lx size %zu", (long)addr, size);
622 			rc = qspi_get_zephyr_ret_code(res);
623 			break;
624 		}
625 	}
626 
627 	rc2 = qspi_nor_write_protection_set(dev, true);
628 
629 	return rc != 0 ? rc : rc2;
630 }
631 
configure_chip(const struct device * dev)632 static int configure_chip(const struct device *dev)
633 {
634 	const struct qspi_nor_config *dev_config = dev->config;
635 	int rc = 0;
636 
637 	/* Set QE to match transfer mode.  If not using quad
638 	 * it's OK to leave QE set, but doing so prevents use
639 	 * of WP#/RESET#/HOLD# which might be useful.
640 	 *
641 	 * Note build assert above ensures QER is S1B6 or
642 	 * S2B1v1/4/5/6. Other options require more logic.
643 	 */
644 #if !IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_NONE)
645 		nrf_qspi_prot_conf_t const *prot_if =
646 			&dev_config->nrfx_cfg.prot_if;
647 		bool qe_value = (prot_if->writeoc == NRF_QSPI_WRITEOC_PP4IO) ||
648 				(prot_if->writeoc == NRF_QSPI_WRITEOC_PP4O)  ||
649 				(prot_if->readoc == NRF_QSPI_READOC_READ4IO) ||
650 				(prot_if->readoc == NRF_QSPI_READOC_READ4O)  ||
651 				(prot_if->readoc == NRF_QSPI_READOC_READ2IO);
652 		uint8_t sr_num = 0;
653 		uint8_t qe_mask = 0;
654 
655 #if IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S1B6)
656 		sr_num = 1;
657 		qe_mask = BIT(6);
658 #elif QER_IS_S2B1
659 		sr_num = 2;
660 		qe_mask = BIT(1);
661 #else
662 		LOG_ERR("Unsupported QER type");
663 		return -EINVAL;
664 #endif
665 
666 		rc = qspi_rdsr(dev, sr_num);
667 		if (rc < 0) {
668 			LOG_ERR("RDSR failed: %d", rc);
669 			return rc;
670 		}
671 
672 		uint8_t sr = (uint8_t)rc;
673 		bool qe_state = ((sr & qe_mask) != 0U);
674 
675 		LOG_DBG("RDSR %02x QE %d need %d: %s", sr, qe_state, qe_value,
676 			(qe_state != qe_value) ? "updating" : "no-change");
677 
678 		rc = 0;
679 		if (qe_state != qe_value) {
680 			sr ^= qe_mask;
681 			rc = qspi_wrsr(dev, sr, sr_num);
682 		}
683 
684 		if (rc < 0) {
685 			LOG_ERR("QE %s failed: %d", qe_value ? "set" : "clear",
686 				rc);
687 			return rc;
688 		}
689 #endif
690 
691 	if (INST_0_4BA != 0) {
692 		struct qspi_cmd cmd = {
693 			.op_code = SPI_NOR_CMD_4BA,
694 		};
695 
696 		/* Call will send write enable before instruction if that
697 		 * requirement is encoded in INST_0_4BA.
698 		 */
699 		rc = qspi_send_cmd(dev, &cmd, (INST_0_4BA & 0x02));
700 
701 		if (rc < 0) {
702 			LOG_ERR("E4BA cmd issue failed: %d.", rc);
703 		} else {
704 			LOG_DBG("E4BA cmd issued.");
705 		}
706 	}
707 
708 	return rc;
709 }
710 
qspi_rdid(const struct device * dev,uint8_t * id)711 static int qspi_rdid(const struct device *dev, uint8_t *id)
712 {
713 	const struct qspi_buf rx_buf = {
714 		.buf = id,
715 		.len = 3
716 	};
717 	const struct qspi_cmd cmd = {
718 		.op_code = SPI_NOR_CMD_RDID,
719 		.rx_buf = &rx_buf,
720 	};
721 
722 	return qspi_send_cmd(dev, &cmd, false);
723 }
724 
725 #if defined(CONFIG_FLASH_JESD216_API)
726 
qspi_read_jedec_id(const struct device * dev,uint8_t * id)727 static int qspi_read_jedec_id(const struct device *dev, uint8_t *id)
728 {
729 	int rc;
730 
731 	qspi_acquire(dev);
732 
733 	rc = qspi_rdid(dev, id);
734 
735 	qspi_release(dev);
736 
737 	return rc;
738 }
739 
qspi_sfdp_read(const struct device * dev,off_t offset,void * data,size_t len)740 static int qspi_sfdp_read(const struct device *dev, off_t offset,
741 			  void *data, size_t len)
742 {
743 	__ASSERT(data != NULL, "null destination");
744 
745 	uint8_t addr_buf[] = {
746 		offset >> 16,
747 		offset >> 8,
748 		offset,
749 		0,		/* wait state */
750 	};
751 	nrf_qspi_cinstr_conf_t cinstr_cfg = {
752 		.opcode = JESD216_CMD_READ_SFDP,
753 		.length = NRF_QSPI_CINSTR_LEN_1B,
754 		.io2_level = true,
755 		.io3_level = true,
756 	};
757 	nrfx_err_t res;
758 
759 	qspi_acquire(dev);
760 
761 	res = nrfx_qspi_lfm_start(&cinstr_cfg);
762 	if (res != NRFX_SUCCESS) {
763 		LOG_DBG("lfm_start: %x", res);
764 		goto out;
765 	}
766 
767 	res = nrfx_qspi_lfm_xfer(addr_buf, NULL, sizeof(addr_buf), false);
768 	if (res != NRFX_SUCCESS) {
769 		LOG_DBG("lfm_xfer addr: %x", res);
770 		goto out;
771 	}
772 
773 	res = nrfx_qspi_lfm_xfer(NULL, data, len, true);
774 	if (res != NRFX_SUCCESS) {
775 		LOG_DBG("lfm_xfer read: %x", res);
776 		goto out;
777 	}
778 
779 out:
780 	qspi_release(dev);
781 
782 	return qspi_get_zephyr_ret_code(res);
783 }
784 
785 #endif /* CONFIG_FLASH_JESD216_API */
786 
read_non_aligned(const struct device * dev,off_t addr,void * dest,size_t size)787 static inline nrfx_err_t read_non_aligned(const struct device *dev,
788 					  off_t addr,
789 					  void *dest, size_t size)
790 {
791 	uint8_t __aligned(WORD_SIZE) buf[WORD_SIZE * 2];
792 	uint8_t *dptr = dest;
793 
794 	off_t flash_prefix = (WORD_SIZE - (addr % WORD_SIZE)) % WORD_SIZE;
795 
796 	if (flash_prefix > size) {
797 		flash_prefix = size;
798 	}
799 
800 	off_t dest_prefix = (WORD_SIZE - (off_t)dptr % WORD_SIZE) % WORD_SIZE;
801 
802 	if (dest_prefix > size) {
803 		dest_prefix = size;
804 	}
805 
806 	off_t flash_suffix = (size - flash_prefix) % WORD_SIZE;
807 	off_t flash_middle = size - flash_prefix - flash_suffix;
808 	off_t dest_middle = size - dest_prefix -
809 			    (size - dest_prefix) % WORD_SIZE;
810 
811 	if (flash_middle > dest_middle) {
812 		flash_middle = dest_middle;
813 		flash_suffix = size - flash_prefix - flash_middle;
814 	}
815 
816 	nrfx_err_t res = NRFX_SUCCESS;
817 
818 	/* read from aligned flash to aligned memory */
819 	if (flash_middle != 0) {
820 		res = nrfx_qspi_read(dptr + dest_prefix, flash_middle,
821 				     addr + flash_prefix);
822 		qspi_wait_for_completion(dev, res);
823 		if (res != NRFX_SUCCESS) {
824 			return res;
825 		}
826 
827 		/* perform shift in RAM */
828 		if (flash_prefix != dest_prefix) {
829 			memmove(dptr + flash_prefix, dptr + dest_prefix, flash_middle);
830 		}
831 	}
832 
833 	/* read prefix */
834 	if (flash_prefix != 0) {
835 		res = nrfx_qspi_read(buf, WORD_SIZE, addr -
836 				     (WORD_SIZE - flash_prefix));
837 		qspi_wait_for_completion(dev, res);
838 		if (res != NRFX_SUCCESS) {
839 			return res;
840 		}
841 		memcpy(dptr, buf + WORD_SIZE - flash_prefix, flash_prefix);
842 	}
843 
844 	/* read suffix */
845 	if (flash_suffix != 0) {
846 		res = nrfx_qspi_read(buf, WORD_SIZE * 2,
847 				     addr + flash_prefix + flash_middle);
848 		qspi_wait_for_completion(dev, res);
849 		if (res != NRFX_SUCCESS) {
850 			return res;
851 		}
852 		memcpy(dptr + flash_prefix + flash_middle, buf, flash_suffix);
853 	}
854 
855 	return res;
856 }
857 
qspi_nor_read(const struct device * dev,off_t addr,void * dest,size_t size)858 static int qspi_nor_read(const struct device *dev, off_t addr, void *dest,
859 			 size_t size)
860 {
861 	const struct qspi_nor_config *params = dev->config;
862 	nrfx_err_t res;
863 
864 	if (!dest) {
865 		return -EINVAL;
866 	}
867 
868 	/* read size must be non-zero */
869 	if (!size) {
870 		return 0;
871 	}
872 
873 	/* affected region should be within device */
874 	if (addr < 0 ||
875 	    (addr + size) > params->size) {
876 		LOG_ERR("read error: address or size "
877 			"exceeds expected values."
878 			"Addr: 0x%lx size %zu", (long)addr, size);
879 		return -EINVAL;
880 	}
881 
882 	qspi_acquire(dev);
883 
884 	res = read_non_aligned(dev, addr, dest, size);
885 
886 	qspi_release(dev);
887 
888 	return qspi_get_zephyr_ret_code(res);
889 }
890 
891 /* addr aligned, sptr not null, slen less than 4 */
write_sub_word(const struct device * dev,off_t addr,const void * sptr,size_t slen)892 static inline nrfx_err_t write_sub_word(const struct device *dev, off_t addr,
893 					const void *sptr, size_t slen)
894 {
895 	uint8_t __aligned(4) buf[4];
896 	nrfx_err_t res;
897 
898 	/* read out the whole word so that unchanged data can be
899 	 * written back
900 	 */
901 	res = nrfx_qspi_read(buf, sizeof(buf), addr);
902 	qspi_wait_for_completion(dev, res);
903 
904 	if (res == NRFX_SUCCESS) {
905 		memcpy(buf, sptr, slen);
906 		res = nrfx_qspi_write(buf, sizeof(buf), addr);
907 		qspi_wait_for_completion(dev, res);
908 	}
909 
910 	return res;
911 }
912 
913 BUILD_ASSERT((CONFIG_NORDIC_QSPI_NOR_STACK_WRITE_BUFFER_SIZE % 4) == 0,
914 	     "NOR stack buffer must be multiple of 4 bytes");
915 
916 /* If enabled write using a stack-allocated aligned SRAM buffer as
917  * required for DMA transfers by QSPI peripheral.
918  *
919  * If not enabled return the error the peripheral would have produced.
920  */
write_through_buffer(const struct device * dev,off_t addr,const void * sptr,size_t slen)921 static nrfx_err_t write_through_buffer(const struct device *dev, off_t addr,
922 				       const void *sptr, size_t slen)
923 {
924 	nrfx_err_t res = NRFX_SUCCESS;
925 
926 	if (CONFIG_NORDIC_QSPI_NOR_STACK_WRITE_BUFFER_SIZE > 0) {
927 		uint8_t __aligned(4) buf[CONFIG_NORDIC_QSPI_NOR_STACK_WRITE_BUFFER_SIZE];
928 		const uint8_t *sp = sptr;
929 
930 		while ((slen > 0) && (res == NRFX_SUCCESS)) {
931 			size_t len = MIN(slen, sizeof(buf));
932 
933 			memcpy(buf, sp, len);
934 			res = nrfx_qspi_write(buf, len, addr);
935 			qspi_wait_for_completion(dev, res);
936 
937 			if (res == NRFX_SUCCESS) {
938 				slen -= len;
939 				sp += len;
940 				addr += len;
941 			}
942 		}
943 	} else {
944 		res = NRFX_ERROR_INVALID_ADDR;
945 	}
946 	return res;
947 }
948 
qspi_nor_write(const struct device * dev,off_t addr,const void * src,size_t size)949 static int qspi_nor_write(const struct device *dev, off_t addr,
950 			  const void *src,
951 			  size_t size)
952 {
953 	const struct qspi_nor_config *params = dev->config;
954 	int rc, rc2;
955 
956 	if (!src) {
957 		return -EINVAL;
958 	}
959 
960 	/* write size must be non-zero, less than 4, or a multiple of 4 */
961 	if ((size == 0)
962 	    || ((size > 4) && ((size % 4U) != 0))) {
963 		return -EINVAL;
964 	}
965 	/* address must be 4-byte aligned */
966 	if ((addr % 4U) != 0) {
967 		return -EINVAL;
968 	}
969 
970 	/* affected region should be within device */
971 	if (addr < 0 ||
972 	    (addr + size) > params->size) {
973 		LOG_ERR("write error: address or size "
974 			"exceeds expected values."
975 			"Addr: 0x%lx size %zu", (long)addr, size);
976 		return -EINVAL;
977 	}
978 
979 	qspi_acquire(dev);
980 
981 	rc = qspi_nor_write_protection_set(dev, false);
982 	if (rc == 0) {
983 		nrfx_err_t res;
984 
985 		if (size < 4U) {
986 			res = write_sub_word(dev, addr, src, size);
987 		} else if (!nrfx_is_in_ram(src) ||
988 			   !nrfx_is_word_aligned(src)) {
989 			res = write_through_buffer(dev, addr, src, size);
990 		} else {
991 			res = nrfx_qspi_write(src, size, addr);
992 			qspi_wait_for_completion(dev, res);
993 		}
994 
995 		rc = qspi_get_zephyr_ret_code(res);
996 	}
997 
998 	rc2 = qspi_nor_write_protection_set(dev, true);
999 
1000 	qspi_release(dev);
1001 
1002 	return rc != 0 ? rc : rc2;
1003 }
1004 
qspi_nor_erase(const struct device * dev,off_t addr,size_t size)1005 static int qspi_nor_erase(const struct device *dev, off_t addr, size_t size)
1006 {
1007 	const struct qspi_nor_config *params = dev->config;
1008 	int rc;
1009 
1010 	/* address must be sector-aligned */
1011 	if ((addr % QSPI_SECTOR_SIZE) != 0) {
1012 		return -EINVAL;
1013 	}
1014 
1015 	/* size must be a non-zero multiple of sectors */
1016 	if ((size == 0) || (size % QSPI_SECTOR_SIZE) != 0) {
1017 		return -EINVAL;
1018 	}
1019 
1020 	/* affected region should be within device */
1021 	if (addr < 0 ||
1022 	    (addr + size) > params->size) {
1023 		LOG_ERR("erase error: address or size "
1024 			"exceeds expected values."
1025 			"Addr: 0x%lx size %zu", (long)addr, size);
1026 		return -EINVAL;
1027 	}
1028 
1029 	qspi_acquire(dev);
1030 
1031 	rc = qspi_erase(dev, addr, size);
1032 
1033 	qspi_release(dev);
1034 
1035 	return rc;
1036 }
1037 
qspi_nor_write_protection_set(const struct device * dev,bool write_protect)1038 static int qspi_nor_write_protection_set(const struct device *dev,
1039 					 bool write_protect)
1040 {
1041 	int rc = 0;
1042 	struct qspi_cmd cmd = {
1043 		.op_code = ((write_protect) ? SPI_NOR_CMD_WRDI : SPI_NOR_CMD_WREN),
1044 	};
1045 
1046 	if (qspi_send_cmd(dev, &cmd, false) != 0) {
1047 		rc = -EIO;
1048 	}
1049 
1050 	return rc;
1051 }
1052 
qspi_init(const struct device * dev)1053 static int qspi_init(const struct device *dev)
1054 {
1055 	const struct qspi_nor_config *dev_config = dev->config;
1056 	uint8_t id[SPI_NOR_MAX_ID_LEN];
1057 	nrfx_err_t res;
1058 	int rc;
1059 
1060 	res = nrfx_qspi_init(&dev_config->nrfx_cfg, qspi_handler, dev->data);
1061 	rc = qspi_get_zephyr_ret_code(res);
1062 	if (rc < 0) {
1063 		return rc;
1064 	}
1065 
1066 #if DT_INST_NODE_HAS_PROP(0, rx_delay)
1067 	if (!nrf53_errata_121()) {
1068 		nrf_qspi_iftiming_set(NRF_QSPI, DT_INST_PROP(0, rx_delay));
1069 	}
1070 #endif
1071 
1072 	/* It may happen that after the flash chip was previously put into
1073 	 * the DPD mode, the system was reset but the flash chip was not.
1074 	 * Consequently, the flash chip can be in the DPD mode at this point.
1075 	 * Some flash chips will just exit the DPD mode on the first CS pulse,
1076 	 * but some need to receive the dedicated command to do it, so send it.
1077 	 * This can be the case even if the current image does not have
1078 	 * CONFIG_PM_DEVICE set to enter DPD mode, as a previously executing image
1079 	 * (for example the main image if the currently executing image is the
1080 	 * bootloader) might have set DPD mode before reboot. As a result,
1081 	 * attempt to exit DPD mode regardless of whether CONFIG_PM_DEVICE is set.
1082 	 */
1083 	rc = exit_dpd(dev);
1084 	if (rc < 0) {
1085 		return rc;
1086 	}
1087 
1088 	/* Retrieve the Flash JEDEC ID and compare it with the one expected. */
1089 	rc = qspi_rdid(dev, id);
1090 	if (rc < 0) {
1091 		return rc;
1092 	}
1093 
1094 	if (memcmp(dev_config->id, id, SPI_NOR_MAX_ID_LEN) != 0) {
1095 		LOG_ERR("JEDEC id [%02x %02x %02x] expect [%02x %02x %02x]",
1096 			id[0], id[1], id[2], dev_config->id[0],
1097 			dev_config->id[1], dev_config->id[2]);
1098 		return -ENODEV;
1099 	}
1100 
1101 	/* The chip is correct, it can be configured now. */
1102 	return configure_chip(dev);
1103 }
1104 
qspi_nor_init(const struct device * dev)1105 static int qspi_nor_init(const struct device *dev)
1106 {
1107 	const struct qspi_nor_config *dev_config = dev->config;
1108 	int rc;
1109 
1110 	rc = pinctrl_apply_state(dev_config->pcfg, PINCTRL_STATE_DEFAULT);
1111 	if (rc < 0) {
1112 		return rc;
1113 	}
1114 
1115 	IRQ_CONNECT(DT_IRQN(QSPI_NODE), DT_IRQ(QSPI_NODE, priority),
1116 		    nrfx_isr, nrfx_qspi_irq_handler, 0);
1117 
1118 	qspi_clock_div_change();
1119 
1120 	rc = qspi_init(dev);
1121 
1122 	qspi_clock_div_restore();
1123 
1124 	if (!IS_ENABLED(CONFIG_NORDIC_QSPI_NOR_XIP) && nrfx_qspi_init_check()) {
1125 		(void)nrfx_qspi_deactivate();
1126 	}
1127 
1128 #ifdef CONFIG_NORDIC_QSPI_NOR_XIP
1129 	if (rc == 0) {
1130 		/* Enable XIP mode for QSPI NOR flash, this will prevent the
1131 		 * flash from being powered down
1132 		 */
1133 		z_impl_nrf_qspi_nor_xip_enable(dev, true);
1134 	}
1135 #endif
1136 
1137 	return rc;
1138 }
1139 
1140 #if defined(CONFIG_FLASH_PAGE_LAYOUT)
1141 
1142 /* instance 0 page count */
1143 #define LAYOUT_PAGES_COUNT (INST_0_BYTES / \
1144 			    CONFIG_NORDIC_QSPI_NOR_FLASH_LAYOUT_PAGE_SIZE)
1145 
1146 BUILD_ASSERT((CONFIG_NORDIC_QSPI_NOR_FLASH_LAYOUT_PAGE_SIZE *
1147 	      LAYOUT_PAGES_COUNT)
1148 	     == INST_0_BYTES,
1149 	     "QSPI_NOR_FLASH_LAYOUT_PAGE_SIZE incompatible with flash size");
1150 
1151 static const struct flash_pages_layout dev_layout = {
1152 	.pages_count = LAYOUT_PAGES_COUNT,
1153 	.pages_size = CONFIG_NORDIC_QSPI_NOR_FLASH_LAYOUT_PAGE_SIZE,
1154 };
1155 #undef LAYOUT_PAGES_COUNT
1156 
qspi_nor_pages_layout(const struct device * dev,const struct flash_pages_layout ** layout,size_t * layout_size)1157 static void qspi_nor_pages_layout(const struct device *dev,
1158 				  const struct flash_pages_layout **layout,
1159 				  size_t *layout_size)
1160 {
1161 	*layout = &dev_layout;
1162 	*layout_size = 1;
1163 }
1164 #endif /* CONFIG_FLASH_PAGE_LAYOUT */
1165 
1166 static const struct flash_parameters *
qspi_flash_get_parameters(const struct device * dev)1167 qspi_flash_get_parameters(const struct device *dev)
1168 {
1169 	ARG_UNUSED(dev);
1170 
1171 	static const struct flash_parameters qspi_flash_parameters = {
1172 		.write_block_size = 4,
1173 		.erase_value = 0xff,
1174 	};
1175 
1176 	return &qspi_flash_parameters;
1177 }
1178 
qspi_nor_get_size(const struct device * dev,uint64_t * size)1179 int qspi_nor_get_size(const struct device *dev, uint64_t *size)
1180 {
1181 	ARG_UNUSED(dev);
1182 
1183 	*size = (uint64_t)(INST_0_BYTES);
1184 
1185 	return 0;
1186 }
1187 
1188 static DEVICE_API(flash, qspi_nor_api) = {
1189 	.read = qspi_nor_read,
1190 	.write = qspi_nor_write,
1191 	.erase = qspi_nor_erase,
1192 	.get_parameters = qspi_flash_get_parameters,
1193 	.get_size = qspi_nor_get_size,
1194 #if defined(CONFIG_FLASH_PAGE_LAYOUT)
1195 	.page_layout = qspi_nor_pages_layout,
1196 #endif
1197 #if defined(CONFIG_FLASH_JESD216_API)
1198 	.sfdp_read = qspi_sfdp_read,
1199 	.read_jedec_id = qspi_read_jedec_id,
1200 #endif /* CONFIG_FLASH_JESD216_API */
1201 };
1202 
1203 #ifdef CONFIG_PM_DEVICE
enter_dpd(const struct device * const dev)1204 static int enter_dpd(const struct device *const dev)
1205 {
1206 	if (IS_ENABLED(DT_INST_PROP(0, has_dpd))) {
1207 		struct qspi_cmd cmd = {
1208 			.op_code = SPI_NOR_CMD_DPD,
1209 		};
1210 		uint32_t t_enter_dpd = DT_INST_PROP_OR(0, t_enter_dpd, 0);
1211 		int rc;
1212 
1213 		rc = qspi_send_cmd(dev, &cmd, false);
1214 		if (rc < 0) {
1215 			return rc;
1216 		}
1217 
1218 		if (t_enter_dpd) {
1219 			uint32_t t_enter_dpd_us =
1220 				DIV_ROUND_UP(t_enter_dpd, NSEC_PER_USEC);
1221 
1222 			k_busy_wait(t_enter_dpd_us);
1223 		}
1224 	}
1225 
1226 	return 0;
1227 }
1228 #endif /* CONFIG_PM_DEVICE */
1229 
exit_dpd(const struct device * const dev)1230 static int exit_dpd(const struct device *const dev)
1231 {
1232 	if (IS_ENABLED(DT_INST_PROP(0, has_dpd))) {
1233 		nrf_qspi_pins_t pins;
1234 		nrf_qspi_pins_t disconnected_pins = {
1235 			.sck_pin = NRF_QSPI_PIN_NOT_CONNECTED,
1236 			.csn_pin = NRF_QSPI_PIN_NOT_CONNECTED,
1237 			.io0_pin = NRF_QSPI_PIN_NOT_CONNECTED,
1238 			.io1_pin = NRF_QSPI_PIN_NOT_CONNECTED,
1239 			.io2_pin = NRF_QSPI_PIN_NOT_CONNECTED,
1240 			.io3_pin = NRF_QSPI_PIN_NOT_CONNECTED,
1241 		};
1242 		struct qspi_cmd cmd = {
1243 			.op_code = SPI_NOR_CMD_RDPD,
1244 		};
1245 		uint32_t t_exit_dpd = DT_INST_PROP_OR(0, t_exit_dpd, 0);
1246 		nrfx_err_t res;
1247 		int rc;
1248 
1249 		nrf_qspi_pins_get(NRF_QSPI, &pins);
1250 		nrf_qspi_pins_set(NRF_QSPI, &disconnected_pins);
1251 		res = nrfx_qspi_activate(true);
1252 		nrf_qspi_pins_set(NRF_QSPI, &pins);
1253 
1254 		if (res != NRFX_SUCCESS) {
1255 			return -EIO;
1256 		}
1257 
1258 		rc = qspi_send_cmd(dev, &cmd, false);
1259 		if (rc < 0) {
1260 			return rc;
1261 		}
1262 
1263 		if (t_exit_dpd) {
1264 			uint32_t t_exit_dpd_us =
1265 				DIV_ROUND_UP(t_exit_dpd, NSEC_PER_USEC);
1266 
1267 			k_busy_wait(t_exit_dpd_us);
1268 		}
1269 	}
1270 
1271 	return 0;
1272 }
1273 
1274 #ifdef CONFIG_PM_DEVICE
qspi_suspend(const struct device * dev)1275 static int qspi_suspend(const struct device *dev)
1276 {
1277 	const struct qspi_nor_config *dev_config = dev->config;
1278 	nrfx_err_t res;
1279 	int rc;
1280 
1281 	res = nrfx_qspi_mem_busy_check();
1282 	if (res != NRFX_SUCCESS) {
1283 		return -EBUSY;
1284 	}
1285 
1286 	rc = enter_dpd(dev);
1287 	if (rc < 0) {
1288 		return rc;
1289 	}
1290 
1291 	nrfx_qspi_uninit();
1292 
1293 	return pinctrl_apply_state(dev_config->pcfg, PINCTRL_STATE_SLEEP);
1294 }
1295 
qspi_resume(const struct device * dev)1296 static int qspi_resume(const struct device *dev)
1297 {
1298 	const struct qspi_nor_config *dev_config = dev->config;
1299 	nrfx_err_t res;
1300 	int rc;
1301 
1302 	rc = pinctrl_apply_state(dev_config->pcfg, PINCTRL_STATE_DEFAULT);
1303 	if (rc < 0) {
1304 		return rc;
1305 	}
1306 
1307 	res = nrfx_qspi_init(&dev_config->nrfx_cfg, qspi_handler, dev->data);
1308 	if (res != NRFX_SUCCESS) {
1309 		return -EIO;
1310 	}
1311 
1312 	return exit_dpd(dev);
1313 }
1314 
qspi_nor_pm_action(const struct device * dev,enum pm_device_action action)1315 static int qspi_nor_pm_action(const struct device *dev,
1316 			      enum pm_device_action action)
1317 {
1318 	int rc;
1319 
1320 	if (pm_device_is_busy(dev)) {
1321 		return -EBUSY;
1322 	}
1323 
1324 	qspi_lock(dev);
1325 	qspi_clock_div_change();
1326 
1327 	switch (action) {
1328 	case PM_DEVICE_ACTION_SUSPEND:
1329 		rc = qspi_suspend(dev);
1330 		break;
1331 
1332 	case PM_DEVICE_ACTION_RESUME:
1333 		rc = qspi_resume(dev);
1334 		break;
1335 
1336 	default:
1337 		rc = -ENOTSUP;
1338 	}
1339 
1340 	qspi_clock_div_restore();
1341 	qspi_unlock(dev);
1342 
1343 	return rc;
1344 }
1345 #endif /* CONFIG_PM_DEVICE */
1346 
z_impl_nrf_qspi_nor_xip_enable(const struct device * dev,bool enable)1347 void z_impl_nrf_qspi_nor_xip_enable(const struct device *dev, bool enable)
1348 {
1349 	struct qspi_nor_data *dev_data = dev->data;
1350 
1351 	if (dev_data->xip_enabled == enable) {
1352 		return;
1353 	}
1354 
1355 	qspi_acquire(dev);
1356 
1357 #if NRF_QSPI_HAS_XIPEN
1358 	nrf_qspi_xip_set(NRF_QSPI, enable);
1359 #endif
1360 	if (enable) {
1361 		(void)nrfx_qspi_activate(false);
1362 	}
1363 	dev_data->xip_enabled = enable;
1364 
1365 	qspi_release(dev);
1366 }
1367 
1368 #ifdef CONFIG_USERSPACE
1369 #include <zephyr/internal/syscall_handler.h>
1370 
z_vrfy_nrf_qspi_nor_xip_enable(const struct device * dev,bool enable)1371 void z_vrfy_nrf_qspi_nor_xip_enable(const struct device *dev, bool enable)
1372 {
1373 	K_OOPS(K_SYSCALL_SPECIFIC_DRIVER(dev, K_OBJ_DRIVER_FLASH,
1374 					 &qspi_nor_api));
1375 
1376 	z_impl_nrf_qspi_nor_xip_enable(dev, enable);
1377 }
1378 
1379 #include <zephyr/syscalls/nrf_qspi_nor_xip_enable_mrsh.c>
1380 #endif /* CONFIG_USERSPACE */
1381 
1382 static struct qspi_nor_data qspi_nor_dev_data = {
1383 #ifdef CONFIG_MULTITHREADING
1384 	.sem = Z_SEM_INITIALIZER(qspi_nor_dev_data.sem, 1, 1),
1385 	.sync = Z_SEM_INITIALIZER(qspi_nor_dev_data.sync, 0, 1),
1386 #endif /* CONFIG_MULTITHREADING */
1387 };
1388 
1389 NRF_DT_CHECK_NODE_HAS_PINCTRL_SLEEP(QSPI_NODE);
1390 
1391 PINCTRL_DT_DEFINE(QSPI_NODE);
1392 
1393 static const struct qspi_nor_config qspi_nor_dev_config = {
1394 	.nrfx_cfg.skip_gpio_cfg = true,
1395 	.nrfx_cfg.skip_psel_cfg = true,
1396 	.pcfg = PINCTRL_DT_DEV_CONFIG_GET(QSPI_NODE),
1397 	.nrfx_cfg.prot_if = {
1398 		.readoc = COND_CODE_1(DT_INST_NODE_HAS_PROP(0, readoc),
1399 			(_CONCAT(NRF_QSPI_READOC_,
1400 				 DT_STRING_UPPER_TOKEN(DT_DRV_INST(0),
1401 						       readoc))),
1402 			(NRF_QSPI_READOC_FASTREAD)),
1403 		.writeoc = COND_CODE_1(DT_INST_NODE_HAS_PROP(0, writeoc),
1404 			(_CONCAT(NRF_QSPI_WRITEOC_,
1405 				 DT_STRING_UPPER_TOKEN(DT_DRV_INST(0),
1406 						       writeoc))),
1407 			(NRF_QSPI_WRITEOC_PP)),
1408 		.addrmode = DT_INST_PROP(0, address_size_32)
1409 			    ? NRF_QSPI_ADDRMODE_32BIT
1410 			    : NRF_QSPI_ADDRMODE_24BIT,
1411 	},
1412 	.nrfx_cfg.phy_if = {
1413 		.sck_freq = INST_0_SCK_CFG,
1414 		.sck_delay = DT_INST_PROP(0, sck_delay),
1415 		.spi_mode = INST_0_SPI_MODE,
1416 	},
1417 	.nrfx_cfg.timeout = CONFIG_NORDIC_QSPI_NOR_TIMEOUT_MS,
1418 
1419 	.size = INST_0_BYTES,
1420 	.id = DT_INST_PROP(0, jedec_id),
1421 };
1422 
1423 PM_DEVICE_DT_INST_DEFINE(0, qspi_nor_pm_action);
1424 
1425 DEVICE_DT_INST_DEFINE(0, qspi_nor_init, PM_DEVICE_DT_INST_GET(0),
1426 		      &qspi_nor_dev_data, &qspi_nor_dev_config,
1427 		      POST_KERNEL, CONFIG_NORDIC_QSPI_NOR_INIT_PRIORITY,
1428 		      &qspi_nor_api);
1429