1 /*
2 * Copyright (c) 2016 - 2025, Nordic Semiconductor ASA
3 * All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice, this
11 * list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <nrfx.h>
35
36 #if NRFX_CHECK(NRFX_QSPI_ENABLED)
37
38 #include <nrfx_qspi.h>
39 #include <hal/nrf_clock.h>
40 #include <hal/nrf_gpio.h>
41 #include <nrf_erratas.h>
42
43 #define NRFX_LOG_MODULE QSPI
44 #include <nrfx_log.h>
45
46 /** @brief Command byte used to read status register. */
47 #define QSPI_STD_CMD_RDSR 0x05
48
49 /** @brief Byte used to mask status register and retrieve the write-in-progess bit. */
50 #define QSPI_MEM_STATUSREG_WIP_MASK 0x01
51
52 /** @brief Default time used in timeout function. */
53 #define QSPI_DEF_WAIT_TIME_US 10
54
55 /**
56 * @brief Default number of tries in timeout function.
57 *
58 * When the flash memory is busy with some operation, waiting for the READY
59 * event even when only the ACTIVATE task is triggered may take significant
60 * amount of time. The below default number of attempts gives the maximum
61 * waiting time of 500 ms what should cover most cases, including erasing
62 * of sectors in most flash chips.
63 */
64 #define QSPI_DEF_WAIT_ATTEMPTS 50000
65
66 /**
67 * @brief Macro for initializing a QSPI pin.
68 *
69 * QSPI peripheral expects high drive pin strength.
70 */
71 #define QSPI_PIN_INIT(_pin) nrf_gpio_cfg((_pin), \
72 NRF_GPIO_PIN_DIR_INPUT, \
73 NRF_GPIO_PIN_INPUT_DISCONNECT, \
74 NRF_GPIO_PIN_NOPULL, \
75 NRF_GPIO_PIN_H0H1, \
76 NRF_GPIO_PIN_NOSENSE)
77
78 #if !defined(USE_WORKAROUND_FOR_ANOMALY_121) && defined(NRF53_SERIES)
79 // ANOMALY 121 - Configuration of QSPI peripheral requires additional steps.
80 #define USE_WORKAROUND_FOR_ANOMALY_121 1
81 #endif
82
83 /** @brief QSPI driver states.*/
84 typedef enum
85 {
86 NRFX_QSPI_STATE_UNINITIALIZED = 0,
87 NRFX_QSPI_STATE_IDLE,
88 NRFX_QSPI_STATE_WRITE,
89 NRFX_QSPI_STATE_READ,
90 NRFX_QSPI_STATE_ERASE,
91 NRFX_QSPI_STATE_CINSTR,
92 } nrfx_qspi_state_t;
93
94 /** @brief Control block - driver instance local data. */
95 typedef struct
96 {
97 nrfx_qspi_handler_t handler; /**< Handler. */
98 void * p_context; /**< Driver context used in interrupt. */
99 void * p_buffer_primary; /**< Pointer to the primary buffer. */
100 void * p_buffer_secondary; /**< Pointer to the secondary buffer. */
101 uint32_t size_primary; /**< Size of the primary buffer. */
102 uint32_t size_secondary; /**< Size of the secondary buffer. */
103 uint32_t addr_primary; /**< Address for the primary buffer. */
104 uint32_t addr_secondary; /**< Address for the secondary buffer. */
105 nrfx_qspi_evt_ext_t evt_ext; /**< Extended event. */
106 nrfx_qspi_state_t state; /**< Driver state. */
107 uint32_t timeout; /**< Time in milliseconds used for operation timeout. */
108 bool volatile activated; /**< Flag indicating whether the QSPI is active. */
109 bool volatile timeout_signal; /**< Flag indicating a timeout of an operation.
110 * The flag is used to trigger premature timeout
111 * if @ref nrfx_qspi_timeout_signal is used. */
112 bool skip_gpio_cfg; /**< Do not touch GPIO configuration of used pins. */
113 } qspi_control_block_t;
114
115 static qspi_control_block_t m_cb;
116
117 static nrfx_err_t qspi_activate(bool wait);
118 static nrfx_err_t qspi_ready_wait(void);
119 static void qspi_workaround_215_43_apply(void);
120 static bool qspi_errata_159_conditions_check(void);
121
qspi_xfer(void * p_buffer,size_t length,uint32_t address,nrfx_qspi_state_t desired_state)122 static nrfx_err_t qspi_xfer(void * p_buffer,
123 size_t length,
124 uint32_t address,
125 nrfx_qspi_state_t desired_state)
126 {
127 NRFX_ASSERT(m_cb.state != NRFX_QSPI_STATE_UNINITIALIZED);
128 NRFX_ASSERT(p_buffer != NULL);
129
130 if (qspi_errata_159_conditions_check())
131 {
132 return NRFX_ERROR_FORBIDDEN;
133 }
134
135 if (!nrfx_is_in_ram(p_buffer) || !nrfx_is_word_aligned(p_buffer))
136 {
137 return NRFX_ERROR_INVALID_ADDR;
138 }
139
140 if (m_cb.state != NRFX_QSPI_STATE_IDLE &&
141 (m_cb.state != desired_state || !m_cb.activated))
142 {
143 return NRFX_ERROR_BUSY;
144 }
145
146 nrf_qspi_task_t task;
147 if (desired_state == NRFX_QSPI_STATE_WRITE)
148 {
149 nrf_qspi_write_buffer_set(NRF_QSPI, p_buffer, length, address);
150 task = NRF_QSPI_TASK_WRITESTART;
151 }
152 else
153 {
154 nrf_qspi_read_buffer_set(NRF_QSPI, p_buffer, length, address);
155 task = NRF_QSPI_TASK_READSTART;
156 }
157
158 m_cb.timeout_signal = false;
159
160 if (!m_cb.handler)
161 {
162 if (!m_cb.activated && qspi_activate(true) == NRFX_ERROR_TIMEOUT)
163 {
164 return NRFX_ERROR_TIMEOUT;
165 }
166
167 nrf_qspi_event_clear(NRF_QSPI, NRF_QSPI_EVENT_READY);
168 nrf_qspi_task_trigger(NRF_QSPI, task);
169
170 return qspi_ready_wait();
171 }
172
173 if (m_cb.p_buffer_primary)
174 {
175 m_cb.p_buffer_secondary = p_buffer;
176 m_cb.size_secondary = length;
177 m_cb.addr_secondary = address;
178 }
179 else
180 {
181 m_cb.p_buffer_primary = p_buffer;
182 m_cb.size_primary = length;
183 m_cb.addr_primary = address;
184
185 m_cb.state = desired_state;
186 nrf_qspi_event_clear(NRF_QSPI, NRF_QSPI_EVENT_READY);
187 nrf_qspi_int_enable(NRF_QSPI, NRF_QSPI_INT_READY_MASK);
188 if (!m_cb.activated)
189 {
190 (void)qspi_activate(false);
191 }
192 else
193 {
194 nrf_qspi_task_trigger(NRF_QSPI, task);
195 }
196 }
197
198 return NRFX_SUCCESS;
199 }
200
qspi_pins_configure(nrfx_qspi_config_t const * p_config)201 static bool qspi_pins_configure(nrfx_qspi_config_t const * p_config)
202 {
203 // If both GPIO configuration and pin selection are to be skipped,
204 // the pin numbers may be not specified at all, so even validation
205 // of those numbers cannot be performed.
206 if (p_config->skip_gpio_cfg && p_config->skip_psel_cfg)
207 {
208 return true;
209 }
210
211 // Check if the user set meaningful values to struct fields. If not, return false.
212 if ((p_config->pins.sck_pin == NRF_QSPI_PIN_NOT_CONNECTED) ||
213 (p_config->pins.csn_pin == NRF_QSPI_PIN_NOT_CONNECTED) ||
214 (p_config->pins.io0_pin == NRF_QSPI_PIN_NOT_CONNECTED) ||
215 (p_config->pins.io1_pin == NRF_QSPI_PIN_NOT_CONNECTED))
216 {
217 return false;
218 }
219
220 #if defined(NRF5340_XXAA)
221 // Check if dedicated QSPI pins are used.
222 enum {
223 QSPI_IO0_DEDICATED = NRF_GPIO_PIN_MAP(0, 13),
224 QSPI_IO1_DEDICATED = NRF_GPIO_PIN_MAP(0, 14),
225 QSPI_IO2_DEDICATED = NRF_GPIO_PIN_MAP(0, 15),
226 QSPI_IO3_DEDICATED = NRF_GPIO_PIN_MAP(0, 16),
227 QSPI_SCK_DEDICATED = NRF_GPIO_PIN_MAP(0, 17),
228 QSPI_CSN_DEDICATED = NRF_GPIO_PIN_MAP(0, 18)
229 };
230
231 if ((p_config->pins.sck_pin != QSPI_SCK_DEDICATED) ||
232 (p_config->pins.csn_pin != QSPI_CSN_DEDICATED) ||
233 (p_config->pins.io0_pin != QSPI_IO0_DEDICATED) ||
234 (p_config->pins.io1_pin != QSPI_IO1_DEDICATED) ||
235 (p_config->pins.io2_pin != NRF_QSPI_PIN_NOT_CONNECTED &&
236 p_config->pins.io2_pin != QSPI_IO2_DEDICATED) ||
237 (p_config->pins.io3_pin != NRF_QSPI_PIN_NOT_CONNECTED &&
238 p_config->pins.io3_pin != QSPI_IO3_DEDICATED))
239 {
240 return false;
241 }
242 #endif
243
244 if (!p_config->skip_gpio_cfg)
245 {
246 QSPI_PIN_INIT(p_config->pins.sck_pin);
247 QSPI_PIN_INIT(p_config->pins.csn_pin);
248 QSPI_PIN_INIT(p_config->pins.io0_pin);
249 QSPI_PIN_INIT(p_config->pins.io1_pin);
250 if (p_config->pins.io2_pin != NRF_QSPI_PIN_NOT_CONNECTED)
251 {
252 QSPI_PIN_INIT(p_config->pins.io2_pin);
253 }
254 if (p_config->pins.io3_pin != NRF_QSPI_PIN_NOT_CONNECTED)
255 {
256 QSPI_PIN_INIT(p_config->pins.io3_pin);
257 }
258 }
259
260 if (!p_config->skip_psel_cfg)
261 {
262 nrf_qspi_pins_set(NRF_QSPI, &p_config->pins);
263 }
264
265 return true;
266 }
267
qspi_pins_deconfigure(void)268 static void qspi_pins_deconfigure(void)
269 {
270 nrf_qspi_pins_t pins;
271 nrf_qspi_pins_get(NRF_QSPI, &pins);
272
273 nrf_gpio_cfg_default(pins.sck_pin);
274 nrf_gpio_cfg_default(pins.csn_pin);
275 nrf_gpio_cfg_default(pins.io0_pin);
276 nrf_gpio_cfg_default(pins.io1_pin);
277 if (pins.io2_pin != NRF_QSPI_PIN_NOT_CONNECTED)
278 {
279 nrf_gpio_cfg_default(pins.io2_pin);
280 }
281 if (pins.io3_pin != NRF_QSPI_PIN_NOT_CONNECTED)
282 {
283 nrf_gpio_cfg_default(pins.io3_pin);
284 }
285 }
286
qspi_ready_wait(void)287 static nrfx_err_t qspi_ready_wait(void)
288 {
289 bool result;
290 uint32_t attempts = m_cb.timeout > 0 ?
291 (m_cb.timeout * 1000UL) / QSPI_DEF_WAIT_TIME_US : QSPI_DEF_WAIT_ATTEMPTS;
292
293 NRFX_WAIT_FOR(nrf_qspi_event_check(NRF_QSPI, NRF_QSPI_EVENT_READY) || m_cb.timeout_signal,
294 attempts,
295 QSPI_DEF_WAIT_TIME_US,
296 result);
297
298 if (!result || m_cb.timeout_signal)
299 {
300 return NRFX_ERROR_TIMEOUT;
301 }
302
303 return NRFX_SUCCESS;
304 }
305
qspi_configure(nrfx_qspi_config_t const * p_config)306 static nrfx_err_t qspi_configure(nrfx_qspi_config_t const * p_config)
307 {
308 if (!qspi_pins_configure(p_config))
309 {
310 return NRFX_ERROR_INVALID_PARAM;
311 }
312
313 m_cb.timeout = p_config->timeout;
314 m_cb.skip_gpio_cfg = p_config->skip_gpio_cfg;
315
316 /* The code below accesses the IFTIMING and IFCONFIG1 registers what
317 * may trigger anomaly 215 on nRF52840 or anomaly 43 on nRF5340. Use
318 * the proper workaround then.
319 */
320 if (NRF52_ERRATA_215_ENABLE_WORKAROUND || NRF53_ERRATA_43_ENABLE_WORKAROUND)
321 {
322 /* The interrupt is disabled because of the anomaly handling.
323 * It will be reenabled if needed before the next QSPI operation.
324 */
325 nrf_qspi_int_disable(NRF_QSPI, NRF_QSPI_INT_READY_MASK);
326 qspi_workaround_215_43_apply();
327 }
328
329 nrf_qspi_xip_offset_set(NRF_QSPI, p_config->xip_offset);
330
331 nrf_qspi_ifconfig0_set(NRF_QSPI, &p_config->prot_if);
332 #if NRFX_CHECK(USE_WORKAROUND_FOR_ANOMALY_121)
333 uint32_t regval = nrf_qspi_ifconfig0_raw_get(NRF_QSPI);
334 if (p_config->phy_if.sck_freq == NRF_QSPI_FREQ_DIV1)
335 {
336 regval |= ((1UL << 16) | (1UL << 17));
337 }
338 else
339 {
340 regval &= ~(1UL << 17);
341 regval |= (1UL << 16);
342 }
343 nrf_qspi_ifconfig0_raw_set(NRF_QSPI, regval);
344 nrf_qspi_iftiming_set(NRF_QSPI, 6);
345 #endif
346 nrf_qspi_ifconfig1_set(NRF_QSPI, &p_config->phy_if);
347
348 if (m_cb.handler)
349 {
350 NRFX_IRQ_PRIORITY_SET(QSPI_IRQn, p_config->irq_priority);
351 NRFX_IRQ_ENABLE(QSPI_IRQn);
352 }
353
354 return NRFX_SUCCESS;
355 }
356
qspi_activate(bool wait)357 static nrfx_err_t qspi_activate(bool wait)
358 {
359 nrf_qspi_enable(NRF_QSPI);
360
361 nrf_qspi_event_clear(NRF_QSPI, NRF_QSPI_EVENT_READY);
362 nrf_qspi_task_trigger(NRF_QSPI, NRF_QSPI_TASK_ACTIVATE);
363
364 if (wait)
365 {
366 nrfx_err_t ret = qspi_ready_wait();
367
368 if (ret == NRFX_SUCCESS)
369 {
370 m_cb.activated = true;
371 }
372 return ret;
373 }
374
375 return NRFX_SUCCESS;
376 }
377
qspi_deactivate(void)378 static void qspi_deactivate(void)
379 {
380 m_cb.activated = false;
381
382 nrf_qspi_int_disable(NRF_QSPI, NRF_QSPI_INT_READY_MASK);
383
384 nrf_qspi_task_trigger(NRF_QSPI, NRF_QSPI_TASK_DEACTIVATE);
385
386 nrf_qspi_disable(NRF_QSPI);
387
388 nrf_qspi_event_clear(NRF_QSPI, NRF_QSPI_EVENT_READY);
389 }
390
qspi_errata_159_conditions_check(void)391 static bool qspi_errata_159_conditions_check(void)
392 {
393 #if NRF_CLOCK_HAS_HFCLK192M && NRF53_ERRATA_159_ENABLE_WORKAROUND
394 if ((nrf_clock_hfclk192m_div_get(NRF_CLOCK) != NRF_CLOCK_HFCLK_DIV_1) ||
395 (nrf_clock_hfclk_div_get(NRF_CLOCK) != NRF_CLOCK_HFCLK_DIV_2))
396 {
397 return true;
398 }
399 else
400 #endif
401 {
402 return false;
403 }
404 }
405
qspi_workaround_215_43_apply(void)406 static void qspi_workaround_215_43_apply(void)
407 {
408 nrf_qspi_pins_t pins;
409 nrf_qspi_pins_t disconnected_pins = {
410 .sck_pin = NRF_QSPI_PIN_NOT_CONNECTED,
411 .csn_pin = NRF_QSPI_PIN_NOT_CONNECTED,
412 .io0_pin = NRF_QSPI_PIN_NOT_CONNECTED,
413 .io1_pin = NRF_QSPI_PIN_NOT_CONNECTED,
414 .io2_pin = NRF_QSPI_PIN_NOT_CONNECTED,
415 .io3_pin = NRF_QSPI_PIN_NOT_CONNECTED,
416 };
417
418 /* Disconnect pins to not wait for response from external memory. */
419 nrf_qspi_pins_get(NRF_QSPI, &pins);
420 nrf_qspi_pins_set(NRF_QSPI, &disconnected_pins);
421
422 nrf_qspi_event_clear(NRF_QSPI, NRF_QSPI_EVENT_READY);
423 nrf_qspi_task_trigger(NRF_QSPI, NRF_QSPI_TASK_ACTIVATE);
424
425 while (!nrf_qspi_event_check(NRF_QSPI, NRF_QSPI_EVENT_READY))
426 {}
427
428 /* Restore previous pins. */
429 nrf_qspi_pins_set(NRF_QSPI, &pins);
430 }
431
nrfx_qspi_init(nrfx_qspi_config_t const * p_config,nrfx_qspi_handler_t handler,void * p_context)432 nrfx_err_t nrfx_qspi_init(nrfx_qspi_config_t const * p_config,
433 nrfx_qspi_handler_t handler,
434 void * p_context)
435 {
436 nrfx_err_t err_code;
437
438 NRFX_ASSERT(p_config);
439
440 if (m_cb.state != NRFX_QSPI_STATE_UNINITIALIZED)
441 {
442 #if NRFX_API_VER_AT_LEAST(3, 2, 0)
443 err_code = NRFX_ERROR_ALREADY;
444 #else
445 err_code = NRFX_ERROR_INVALID_STATE;
446 #endif
447 NRFX_LOG_WARNING("Function: %s, error code: %s.",
448 __func__,
449 NRFX_LOG_ERROR_STRING_GET(err_code));
450 return err_code;
451 }
452
453 m_cb.handler = handler;
454 m_cb.p_context = p_context;
455
456 if (p_config)
457 {
458 err_code = qspi_configure(p_config);
459 if (err_code != NRFX_SUCCESS)
460 {
461 NRFX_LOG_WARNING("Function: %s, error code: %s.",
462 __func__,
463 NRFX_LOG_ERROR_STRING_GET(err_code));
464 return err_code;
465 }
466 }
467
468 m_cb.p_buffer_primary = NULL;
469 m_cb.p_buffer_secondary = NULL;
470
471 m_cb.state = NRFX_QSPI_STATE_IDLE;
472 m_cb.activated = false;
473
474 return NRFX_SUCCESS;
475 }
476
nrfx_qspi_reconfigure(nrfx_qspi_config_t const * p_config)477 nrfx_err_t nrfx_qspi_reconfigure(nrfx_qspi_config_t const * p_config)
478 {
479 NRFX_ASSERT(p_config);
480 nrfx_err_t err_code = NRFX_SUCCESS;
481
482 if (m_cb.state == NRFX_QSPI_STATE_UNINITIALIZED)
483 {
484 return NRFX_ERROR_INVALID_STATE;
485 }
486
487 if (m_cb.state != NRFX_QSPI_STATE_IDLE)
488 {
489 return NRFX_ERROR_BUSY;
490 }
491
492 if (!m_cb.activated)
493 {
494 err_code = qspi_configure(p_config);
495 }
496 else
497 {
498 qspi_deactivate();
499 err_code = qspi_configure(p_config);
500 }
501
502 return err_code;
503 }
504
nrfx_qspi_timeout_signal(void)505 void nrfx_qspi_timeout_signal(void)
506 {
507 NRFX_ASSERT(m_cb.state != NRFX_QSPI_STATE_UNINITIALIZED);
508
509 m_cb.timeout_signal = true;
510 }
511
nrfx_qspi_cinstr_xfer(nrf_qspi_cinstr_conf_t const * p_config,void const * p_tx_buffer,void * p_rx_buffer)512 nrfx_err_t nrfx_qspi_cinstr_xfer(nrf_qspi_cinstr_conf_t const * p_config,
513 void const * p_tx_buffer,
514 void * p_rx_buffer)
515 {
516 NRFX_ASSERT(m_cb.state != NRFX_QSPI_STATE_UNINITIALIZED);
517
518 if (qspi_errata_159_conditions_check())
519 {
520 return NRFX_ERROR_FORBIDDEN;
521 }
522
523 if (m_cb.state != NRFX_QSPI_STATE_IDLE)
524 {
525 return NRFX_ERROR_BUSY;
526 }
527
528 if (!m_cb.activated && qspi_activate(true) == NRFX_ERROR_TIMEOUT)
529 {
530 return NRFX_ERROR_TIMEOUT;
531 }
532
533 /* For custom instruction transfer driver has to switch to blocking mode.
534 * If driver was previously configured to non-blocking mode, interrupts
535 * will get reenabled before next standard transfer.
536 */
537 nrf_qspi_int_disable(NRF_QSPI, NRF_QSPI_INT_READY_MASK);
538
539 /* The code below accesses the CINSTRCONF register what may trigger
540 * anomaly 215 on nRF52840 or anomaly 43 on nRF5340. Use the proper
541 * workaround then.
542 */
543 if (NRF52_ERRATA_215_ENABLE_WORKAROUND || NRF53_ERRATA_43_ENABLE_WORKAROUND)
544 {
545 qspi_workaround_215_43_apply();
546 }
547
548 /* In some cases, only opcode should be sent. To prevent execution, set function code is
549 * surrounded by an if.
550 */
551 if (p_tx_buffer)
552 {
553 nrf_qspi_cinstrdata_set(NRF_QSPI, p_config->length, p_tx_buffer);
554 }
555
556 m_cb.timeout_signal = false;
557
558 nrf_qspi_event_clear(NRF_QSPI, NRF_QSPI_EVENT_READY);
559 nrf_qspi_cinstr_transfer_start(NRF_QSPI, p_config);
560
561 if (qspi_ready_wait() == NRFX_ERROR_TIMEOUT)
562 {
563 // This timeout should never occur when WIPWAIT is not active, since in this
564 // case the QSPI peripheral should send the command immediately, without any
565 // waiting for previous write to complete.
566 NRFX_ASSERT(p_config->wipwait);
567
568 return NRFX_ERROR_TIMEOUT;
569 }
570 nrf_qspi_event_clear(NRF_QSPI, NRF_QSPI_EVENT_READY);
571
572 if (p_rx_buffer)
573 {
574 nrf_qspi_cinstrdata_get(NRF_QSPI, p_config->length, p_rx_buffer);
575 }
576
577 return NRFX_SUCCESS;
578 }
579
nrfx_qspi_cinstr_quick_send(uint8_t opcode,nrf_qspi_cinstr_len_t length,void const * p_tx_buffer)580 nrfx_err_t nrfx_qspi_cinstr_quick_send(uint8_t opcode,
581 nrf_qspi_cinstr_len_t length,
582 void const * p_tx_buffer)
583 {
584 NRFX_ASSERT(m_cb.state != NRFX_QSPI_STATE_UNINITIALIZED);
585 NRFX_ASSERT(p_tx_buffer);
586
587 nrf_qspi_cinstr_conf_t config = NRFX_QSPI_DEFAULT_CINSTR(opcode, length);
588 return nrfx_qspi_cinstr_xfer(&config, p_tx_buffer, NULL);
589 }
590
nrfx_qspi_lfm_start(nrf_qspi_cinstr_conf_t const * p_config)591 nrfx_err_t nrfx_qspi_lfm_start(nrf_qspi_cinstr_conf_t const * p_config)
592 {
593 NRFX_ASSERT(m_cb.state != NRFX_QSPI_STATE_UNINITIALIZED);
594 NRFX_ASSERT(p_config->length == NRF_QSPI_CINSTR_LEN_1B);
595
596 if (qspi_errata_159_conditions_check())
597 {
598 return NRFX_ERROR_FORBIDDEN;
599 }
600
601 if (m_cb.state != NRFX_QSPI_STATE_IDLE)
602 {
603 return NRFX_ERROR_BUSY;
604 }
605
606 if (!m_cb.activated && qspi_activate(true) == NRFX_ERROR_TIMEOUT)
607 {
608 return NRFX_ERROR_TIMEOUT;
609 }
610
611 /* For transferring arbitrary byte length custom instructions driver has to switch to
612 * blocking mode. If driver was previously configured to non-blocking mode, interrupts
613 * will get reenabled before next standard transfer.
614 */
615 nrf_qspi_int_disable(NRF_QSPI, NRF_QSPI_INT_READY_MASK);
616
617 /* The code below accesses the CINSTRCONF register what may trigger
618 * anomaly 215 on nRF52840 or anomaly 43 on nRF5340. Use the proper
619 * workaround then.
620 */
621 if (NRF52_ERRATA_215_ENABLE_WORKAROUND || NRF53_ERRATA_43_ENABLE_WORKAROUND)
622 {
623 qspi_workaround_215_43_apply();
624 }
625
626 NRFX_ASSERT(!(nrf_qspi_cinstr_long_transfer_is_ongoing(NRF_QSPI)));
627
628 m_cb.timeout_signal = false;
629
630 nrf_qspi_event_clear(NRF_QSPI, NRF_QSPI_EVENT_READY);
631 nrf_qspi_cinstr_long_transfer_start(NRF_QSPI, p_config);
632
633 if (qspi_ready_wait() == NRFX_ERROR_TIMEOUT)
634 {
635 /* In case of error, abort long frame mode */
636 nrf_qspi_cinstr_long_transfer_continue(NRF_QSPI, NRF_QSPI_CINSTR_LEN_1B, true);
637 return NRFX_ERROR_TIMEOUT;
638 }
639
640 m_cb.state = NRFX_QSPI_STATE_CINSTR;
641 return NRFX_SUCCESS;
642 }
643
nrfx_qspi_lfm_xfer(void const * p_tx_buffer,void * p_rx_buffer,size_t transfer_length,bool finalize)644 nrfx_err_t nrfx_qspi_lfm_xfer(void const * p_tx_buffer,
645 void * p_rx_buffer,
646 size_t transfer_length,
647 bool finalize)
648 {
649 NRFX_ASSERT(m_cb.state != NRFX_QSPI_STATE_UNINITIALIZED);
650 NRFX_ASSERT(nrf_qspi_cinstr_long_transfer_is_ongoing(NRF_QSPI));
651
652 if (qspi_errata_159_conditions_check())
653 {
654 return NRFX_ERROR_FORBIDDEN;
655 }
656
657 nrfx_err_t status = NRFX_SUCCESS;
658
659 /* Perform transfers in packets of 8 bytes. Last transfer may be shorter. */
660 nrf_qspi_cinstr_len_t length = NRF_QSPI_CINSTR_LEN_9B;
661 for (uint32_t curr_byte = 0; curr_byte < transfer_length; curr_byte += 8)
662 {
663 uint32_t remaining_bytes = transfer_length - curr_byte;
664 m_cb.timeout_signal = false;
665
666 if (remaining_bytes < 8)
667 {
668 length = (nrf_qspi_cinstr_len_t)(remaining_bytes + 1);
669 }
670
671 if (p_tx_buffer)
672 {
673 nrf_qspi_cinstrdata_set(NRF_QSPI,
674 length,
675 &((uint8_t const *)p_tx_buffer)[curr_byte]);
676 }
677
678 nrf_qspi_event_clear(NRF_QSPI, NRF_QSPI_EVENT_READY);
679
680 if (remaining_bytes <= 8)
681 {
682 nrf_qspi_cinstr_long_transfer_continue(NRF_QSPI, length, finalize);
683 }
684 else
685 {
686 nrf_qspi_cinstr_long_transfer_continue(NRF_QSPI, length, false);
687 }
688
689 if (qspi_ready_wait() == NRFX_ERROR_TIMEOUT)
690 {
691 /* In case of error, abort long frame mode */
692 nrf_qspi_cinstr_long_transfer_continue(NRF_QSPI, NRF_QSPI_CINSTR_LEN_1B, true);
693 status = NRFX_ERROR_TIMEOUT;
694 break;
695 }
696
697 if (p_rx_buffer)
698 {
699 nrf_qspi_cinstrdata_get(NRF_QSPI,
700 length,
701 &((uint8_t *)p_rx_buffer)[curr_byte]);
702 }
703 }
704 nrf_qspi_event_clear(NRF_QSPI, NRF_QSPI_EVENT_READY);
705
706 if ((finalize) || (status == NRFX_ERROR_TIMEOUT))
707 {
708 m_cb.state = NRFX_QSPI_STATE_IDLE;
709 }
710
711 return status;
712 }
713
nrfx_qspi_mem_busy_check(void)714 nrfx_err_t nrfx_qspi_mem_busy_check(void)
715 {
716 NRFX_ASSERT(m_cb.state != NRFX_QSPI_STATE_UNINITIALIZED);
717
718 nrfx_err_t ret_code;
719 uint8_t status_value = 0;
720
721 nrf_qspi_cinstr_conf_t const config = {
722 .opcode = QSPI_STD_CMD_RDSR,
723 .length = NRF_QSPI_CINSTR_LEN_2B,
724 // Keep the IO3 line high during the transfer. Otherwise, its low level
725 // can be interpreted by the memory chip as an active HOLD#/RESET#
726 // signal and the status register value may not be output.
727 // Such configuration is also consistent with what the QSPI peripheral
728 // uses when it sends the Read Status Register command itself.
729 .io3_level = true,
730 };
731 ret_code = nrfx_qspi_cinstr_xfer(&config, &status_value, &status_value);
732
733 if (ret_code != NRFX_SUCCESS)
734 {
735 return ret_code;
736 }
737
738 if ((status_value & QSPI_MEM_STATUSREG_WIP_MASK) != 0x00)
739 {
740 return NRFX_ERROR_BUSY;
741 }
742
743 return NRFX_SUCCESS;
744 }
745
nrfx_qspi_uninit(void)746 void nrfx_qspi_uninit(void)
747 {
748 NRFX_ASSERT(m_cb.state != NRFX_QSPI_STATE_UNINITIALIZED);
749
750 NRFX_IRQ_DISABLE(QSPI_IRQn);
751
752 qspi_deactivate();
753 if (!m_cb.skip_gpio_cfg)
754 {
755 qspi_pins_deconfigure();
756 }
757
758 m_cb.state = NRFX_QSPI_STATE_UNINITIALIZED;
759 NRFX_LOG_INFO("Uninitialized.");
760 }
761
nrfx_qspi_activate(bool wait)762 nrfx_err_t nrfx_qspi_activate(bool wait)
763 {
764 NRFX_ASSERT(m_cb.state != NRFX_QSPI_STATE_UNINITIALIZED);
765
766 if (m_cb.activated)
767 {
768 return NRFX_ERROR_ALREADY;
769 }
770
771 return qspi_activate(wait);
772 }
773
nrfx_qspi_deactivate(void)774 nrfx_err_t nrfx_qspi_deactivate(void)
775 {
776 NRFX_ASSERT(m_cb.state != NRFX_QSPI_STATE_UNINITIALIZED);
777
778 if (m_cb.state != NRFX_QSPI_STATE_IDLE)
779 {
780 return NRFX_ERROR_BUSY;
781 }
782
783 qspi_deactivate();
784 return NRFX_SUCCESS;
785 }
786
nrfx_qspi_init_check(void)787 bool nrfx_qspi_init_check(void)
788 {
789 return (m_cb.state != NRFX_QSPI_STATE_UNINITIALIZED);
790 }
791
nrfx_qspi_write(void const * p_tx_buffer,size_t tx_buffer_length,uint32_t dst_address)792 nrfx_err_t nrfx_qspi_write(void const * p_tx_buffer,
793 size_t tx_buffer_length,
794 uint32_t dst_address)
795 {
796 #if defined(__GNUC__)
797 #pragma GCC diagnostic push
798 #pragma GCC diagnostic ignored "-Wcast-qual"
799 #endif
800 return qspi_xfer((void *)p_tx_buffer, tx_buffer_length, dst_address, NRFX_QSPI_STATE_WRITE);
801
802 #if defined(__GNUC__)
803 #pragma GCC diagnostic pop
804 #endif
805 }
806
nrfx_qspi_read(void * p_rx_buffer,size_t rx_buffer_length,uint32_t src_address)807 nrfx_err_t nrfx_qspi_read(void * p_rx_buffer,
808 size_t rx_buffer_length,
809 uint32_t src_address)
810 {
811 return qspi_xfer((void *)p_rx_buffer, rx_buffer_length, src_address, NRFX_QSPI_STATE_READ);
812 }
813
nrfx_qspi_erase(nrf_qspi_erase_len_t length,uint32_t start_address)814 nrfx_err_t nrfx_qspi_erase(nrf_qspi_erase_len_t length,
815 uint32_t start_address)
816 {
817 NRFX_ASSERT(m_cb.state != NRFX_QSPI_STATE_UNINITIALIZED);
818
819 if (qspi_errata_159_conditions_check())
820 {
821 return NRFX_ERROR_FORBIDDEN;
822 }
823
824 if (!nrfx_is_word_aligned((void const *)start_address))
825 {
826 return NRFX_ERROR_INVALID_ADDR;
827 }
828
829 if (m_cb.handler && m_cb.state != NRFX_QSPI_STATE_IDLE)
830 {
831 return NRFX_ERROR_BUSY;
832 }
833
834 nrf_qspi_erase_ptr_set(NRF_QSPI, start_address, length);
835 m_cb.timeout_signal = false;
836
837 if (!m_cb.handler)
838 {
839 if (!m_cb.activated && qspi_activate(true) == NRFX_ERROR_TIMEOUT)
840 {
841 return NRFX_ERROR_TIMEOUT;
842 }
843
844 nrf_qspi_event_clear(NRF_QSPI, NRF_QSPI_EVENT_READY);
845 nrf_qspi_task_trigger(NRF_QSPI, NRF_QSPI_TASK_ERASESTART);
846
847 return qspi_ready_wait();
848 }
849
850 m_cb.state = NRFX_QSPI_STATE_ERASE;
851 nrf_qspi_event_clear(NRF_QSPI, NRF_QSPI_EVENT_READY);
852 nrf_qspi_int_enable(NRF_QSPI, NRF_QSPI_INT_READY_MASK);
853
854 if (!m_cb.activated)
855 {
856 (void)qspi_activate(false);
857 }
858 else
859 {
860 nrf_qspi_task_trigger(NRF_QSPI, NRF_QSPI_TASK_ERASESTART);
861 }
862
863 return NRFX_SUCCESS;
864 }
865
nrfx_qspi_chip_erase(void)866 nrfx_err_t nrfx_qspi_chip_erase(void)
867 {
868 NRFX_ASSERT(m_cb.state != NRFX_QSPI_STATE_UNINITIALIZED);
869
870 return nrfx_qspi_erase(NRF_QSPI_ERASE_LEN_ALL, 0);
871 }
872
nrfx_qspi_event_extended_get(void)873 nrfx_qspi_evt_ext_t const * nrfx_qspi_event_extended_get(void)
874 {
875 NRFX_ASSERT(m_cb.state != NRFX_QSPI_STATE_UNINITIALIZED);
876 NRFX_ASSERT(m_cb.evt_ext.type != NRFX_QSPI_EVENT_NONE);
877 return &m_cb.evt_ext;
878 }
879
nrfx_qspi_xfer_buffered_check(void)880 bool nrfx_qspi_xfer_buffered_check(void)
881 {
882 NRFX_ASSERT(m_cb.state != NRFX_QSPI_STATE_UNINITIALIZED);
883
884 return (bool)m_cb.p_buffer_secondary;
885 }
886
887 #if NRF_QSPI_HAS_XIP_ENC
nrfx_qspi_xip_encrypt(nrf_qspi_encryption_t const * p_config)888 nrfx_err_t nrfx_qspi_xip_encrypt(nrf_qspi_encryption_t const * p_config)
889 {
890 NRFX_ASSERT(m_cb.state != NRFX_QSPI_STATE_UNINITIALIZED);
891
892 if (m_cb.state != NRFX_QSPI_STATE_IDLE)
893 {
894 return NRFX_ERROR_BUSY;
895 }
896
897 if (p_config)
898 {
899 nrf_qspi_xip_encryption_configure(NRF_QSPI, p_config);
900 nrf_qspi_xip_encryption_set(NRF_QSPI, true);
901 }
902 else
903 {
904 nrf_qspi_xip_encryption_set(NRF_QSPI, false);
905 }
906
907 return NRFX_SUCCESS;
908 }
909 #endif
910
911 #if NRF_QSPI_HAS_DMA_ENC
nrfx_qspi_dma_encrypt(nrf_qspi_encryption_t const * p_config)912 nrfx_err_t nrfx_qspi_dma_encrypt(nrf_qspi_encryption_t const * p_config)
913 {
914 NRFX_ASSERT(m_cb.state != NRFX_QSPI_STATE_UNINITIALIZED);
915
916 if (m_cb.state != NRFX_QSPI_STATE_IDLE)
917 {
918 return NRFX_ERROR_BUSY;
919 }
920
921 if (p_config)
922 {
923 nrf_qspi_dma_encryption_configure(NRF_QSPI, p_config);
924 nrf_qspi_dma_encryption_set(NRF_QSPI, true);
925 }
926 else
927 {
928 nrf_qspi_dma_encryption_set(NRF_QSPI, false);
929 }
930
931 return NRFX_SUCCESS;
932 }
933 #endif
934
qspi_event_xfer_handle(nrfx_qspi_evt_ext_xfer_t * p_xfer)935 static void qspi_event_xfer_handle(nrfx_qspi_evt_ext_xfer_t * p_xfer)
936 {
937 p_xfer->p_buffer = (uint8_t *)m_cb.p_buffer_primary;
938 p_xfer->size = m_cb.size_primary;
939 p_xfer->addr = m_cb.addr_primary;
940 if (m_cb.p_buffer_secondary)
941 {
942 m_cb.p_buffer_primary = m_cb.p_buffer_secondary;
943 m_cb.size_primary = m_cb.size_secondary;
944 m_cb.addr_primary = m_cb.addr_secondary;
945
946 m_cb.p_buffer_secondary = NULL;
947 }
948 else
949 {
950 m_cb.p_buffer_primary = NULL;
951 }
952 }
953
qspi_event_erase_handle(nrfx_qspi_evt_ext_erase_t * p_erase)954 static void qspi_event_erase_handle(nrfx_qspi_evt_ext_erase_t * p_erase)
955 {
956 p_erase->addr = nrf_qspi_erase_ptr_get(NRF_QSPI);
957 p_erase->len = nrf_qspi_erase_len_get(NRF_QSPI);
958 }
959
qspi_extended_event_process(nrfx_qspi_evt_ext_t * p_event)960 static void qspi_extended_event_process(nrfx_qspi_evt_ext_t * p_event)
961 {
962 switch (m_cb.state)
963 {
964 case NRFX_QSPI_STATE_WRITE:
965 p_event->type = NRFX_QSPI_EVENT_WRITE_DONE;
966 qspi_event_xfer_handle(&p_event->data.xfer);
967 break;
968
969 case NRFX_QSPI_STATE_READ:
970 p_event->type = NRFX_QSPI_EVENT_READ_DONE;
971 qspi_event_xfer_handle(&p_event->data.xfer);
972 break;
973
974 case NRFX_QSPI_STATE_ERASE:
975 p_event->type = NRFX_QSPI_EVENT_ERASE_DONE;
976 qspi_event_erase_handle(&p_event->data.erase);
977 break;
978
979 default:
980 break;
981 }
982 }
983
qspi_activate_event_process(void)984 static void qspi_activate_event_process(void)
985 {
986 switch (m_cb.state)
987 {
988 case NRFX_QSPI_STATE_WRITE:
989 nrf_qspi_task_trigger(NRF_QSPI, NRF_QSPI_TASK_WRITESTART);
990 break;
991
992 case NRFX_QSPI_STATE_READ:
993 nrf_qspi_task_trigger(NRF_QSPI, NRF_QSPI_TASK_READSTART);
994 break;
995
996 case NRFX_QSPI_STATE_ERASE:
997 nrf_qspi_task_trigger(NRF_QSPI, NRF_QSPI_TASK_ERASESTART);
998 break;
999
1000 default:
1001 break;
1002 }
1003 }
1004
nrfx_qspi_irq_handler(void)1005 void nrfx_qspi_irq_handler(void)
1006 {
1007 // Catch Event ready interrupts
1008 if (nrf_qspi_event_check(NRF_QSPI, NRF_QSPI_EVENT_READY))
1009 {
1010 nrf_qspi_event_clear(NRF_QSPI, NRF_QSPI_EVENT_READY);
1011
1012 if (!m_cb.activated)
1013 {
1014 m_cb.activated = true;
1015 qspi_activate_event_process();
1016 return;
1017 }
1018
1019 qspi_extended_event_process(&m_cb.evt_ext);
1020 if (!m_cb.p_buffer_primary)
1021 {
1022 m_cb.state = NRFX_QSPI_STATE_IDLE;
1023 }
1024
1025 if (!m_cb.timeout_signal)
1026 {
1027 m_cb.handler(NRFX_QSPI_EVENT_DONE, m_cb.p_context);
1028 }
1029
1030 m_cb.evt_ext.type = NRFX_QSPI_EVENT_NONE;
1031 }
1032 }
1033
1034 #endif // NRFX_CHECK(NRFX_QSPI_ENABLED)
1035