1 /*
2 * Copyright (c) 2024 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @brief File containing QSPI device interface specific definitions for the
9 * Zephyr OS layer of the Wi-Fi driver.
10 */
11
12 #include <errno.h>
13 #include <string.h>
14
15 #include <zephyr/init.h>
16 #include <zephyr/logging/log.h>
17 #include <zephyr/drivers/pinctrl.h>
18 #include <zephyr/drivers/wifi/nrf_wifi/bus/qspi_if.h>
19
20 #include <soc.h>
21 #include <nrf_erratas.h>
22 #include <nrfx_qspi.h>
23 #include <hal/nrf_clock.h>
24 #include <hal/nrf_gpio.h>
25
26 #include "spi_nor.h"
27
28 /* The QSPI bus node which the NRF70 is on */
29 #define QSPI_IF_BUS_NODE DT_NODELABEL(qspi)
30
31 /* QSPI bus properties from the devicetree */
32 #define QSPI_IF_BUS_IRQN DT_IRQN(QSPI_IF_BUS_NODE)
33 #define QSPI_IF_BUS_IRQ_PRIO DT_IRQ(QSPI_IF_BUS_NODE, priority)
34 #define QSPI_IF_BUS_SCK_PIN DT_PROP(QSPI_IF_BUS_NODE, sck_pin)
35 #define QSPI_IF_BUS_CSN_PIN DT_PROP(QSPI_IF_BUS_NODE, csn_pins)
36 #define QSPI_IF_BUS_IO0_PIN DT_PROP_BY_IDX(QSPI_IF_BUS_NODE, io_pins, 0)
37 #define QSPI_IF_BUS_IO1_PIN DT_PROP_BY_IDX(QSPI_IF_BUS_NODE, io_pins, 1)
38 #define QSPI_IF_BUS_IO2_PIN DT_PROP_BY_IDX(QSPI_IF_BUS_NODE, io_pins, 2)
39 #define QSPI_IF_BUS_IO3_PIN DT_PROP_BY_IDX(QSPI_IF_BUS_NODE, io_pins, 3)
40
41 #define QSPI_IF_BUS_HAS_4_IO_PINS \
42 (DT_PROP_LEN(QSPI_IF_BUS_NODE, io_pins) == 4)
43
44 #define QSPI_IF_BUS_PINCTRL_DT_DEV_CONFIG_GET \
45 PINCTRL_DT_DEV_CONFIG_GET(QSPI_IF_BUS_NODE)
46
47 /* The NRF70 device node which is on the QSPI bus */
48 #define QSPI_IF_DEVICE_NODE DT_NODELABEL(nrf70)
49
50 /* NRF70 device QSPI properties */
51 #define QSPI_IF_DEVICE_FREQUENCY DT_PROP(QSPI_IF_DEVICE_NODE, qspi_frequency)
52 #define QSPI_IF_DEVICE_CPHA DT_PROP(QSPI_IF_DEVICE_NODE, qspi_cpha)
53 #define QSPI_IF_DEVICE_CPOL DT_PROP(QSPI_IF_DEVICE_NODE, qspi_cpol)
54 #define QSPI_IF_DEVICE_QUAD_MODE DT_PROP(QSPI_IF_DEVICE_NODE, qspi_quad_mode)
55 #define QSPI_IF_DEVICE_RX_DELAY DT_PROP(QSPI_IF_DEVICE_NODE, qspi_rx_delay)
56
57 static struct qspi_config *qspi_cfg;
58 #if NRF_QSPI_HAS_XIP_ENC || NRF_QSPI_HAS_DMA_ENC
59 static unsigned int nonce_last_addr;
60 static unsigned int nonce_cnt;
61 #endif /*NRF_QSPI_HAS_XIP_ENC || NRF_QSPI_HAS_DMA_ENC*/
62
63 /* Main config structure */
64 static nrfx_qspi_config_t QSPIconfig;
65
66 /*
67 * According to the respective specifications, the nRF52 QSPI supports clock
68 * frequencies 2 - 32 MHz and the nRF53 one supports 6 - 96 MHz.
69 */
70 BUILD_ASSERT(QSPI_IF_DEVICE_FREQUENCY >= (NRF_QSPI_BASE_CLOCK_FREQ / 16),
71 "Unsupported SCK frequency.");
72
73 /*
74 * Determine a configuration value (INST_0_SCK_CFG) and, if needed, a divider
75 * (BASE_CLOCK_DIV) for the clock from which the SCK frequency is derived that
76 * need to be used to achieve the SCK frequency as close as possible (but not
77 * higher) to the one specified in DT.
78 */
79 #if defined(CONFIG_SOC_SERIES_NRF53X)
80 /*
81 * On nRF53 Series SoCs, the default /4 divider for the HFCLK192M clock can
82 * only be used when the QSPI peripheral is idle. When a QSPI operation is
83 * performed, the divider needs to be changed to /1 or /2 (particularly,
84 * the specification says that the peripheral "supports 192 MHz and 96 MHz
85 * PCLK192M frequency"), but after that operation is complete, the default
86 * divider needs to be restored to avoid increased current consumption.
87 */
88 #if (QSPI_IF_DEVICE_FREQUENCY >= NRF_QSPI_BASE_CLOCK_FREQ)
89 /* For requested SCK >= 96 MHz, use HFCLK192M / 1 / (2*1) = 96 MHz */
90 #define BASE_CLOCK_DIV NRF_CLOCK_HFCLK_DIV_1
91 #define INST_0_SCK_CFG NRF_QSPI_FREQ_DIV1
92 /* If anomaly 159 is to be prevented, only /1 divider can be used. */
93 #elif NRF53_ERRATA_159_ENABLE_WORKAROUND
94 #define BASE_CLOCK_DIV NRF_CLOCK_HFCLK_DIV_1
95 #define INST_0_SCK_CFG (DIV_ROUND_UP(NRF_QSPI_BASE_CLOCK_FREQ, \
96 QSPI_IF_DEVICE_FREQUENCY) - 1)
97 #elif (QSPI_IF_DEVICE_FREQUENCY >= (NRF_QSPI_BASE_CLOCK_FREQ / 2))
98 /* For 96 MHz > SCK >= 48 MHz, use HFCLK192M / 2 / (2*1) = 48 MHz */
99 #define BASE_CLOCK_DIV NRF_CLOCK_HFCLK_DIV_2
100 #define INST_0_SCK_CFG NRF_QSPI_FREQ_DIV1
101 #elif (QSPI_IF_DEVICE_FREQUENCY >= (NRF_QSPI_BASE_CLOCK_FREQ / 3))
102 /* For 48 MHz > SCK >= 32 MHz, use HFCLK192M / 1 / (2*3) = 32 MHz */
103 #define BASE_CLOCK_DIV NRF_CLOCK_HFCLK_DIV_1
104 #define INST_0_SCK_CFG NRF_QSPI_FREQ_DIV3
105 #else
106 /* For requested SCK < 32 MHz, use divider /2 for HFCLK192M. */
107 #define BASE_CLOCK_DIV NRF_CLOCK_HFCLK_DIV_2
108 #define INST_0_SCK_CFG (DIV_ROUND_UP(NRF_QSPI_BASE_CLOCK_FREQ / 2, \
109 QSPI_IF_DEVICE_FREQUENCY) - 1)
110 #endif
111
112 #if BASE_CLOCK_DIV == NRF_CLOCK_HFCLK_DIV_1
113 /* For 8 MHz, use HFCLK192M / 1 / (2*12) */
114 #define INST_0_SCK_CFG_WAKE NRF_QSPI_FREQ_DIV12
115 #elif BASE_CLOCK_DIV == NRF_CLOCK_HFCLK_DIV_2
116 /* For 8 MHz, use HFCLK192M / 2 / (2*6) */
117 #define INST_0_SCK_CFG_WAKE NRF_QSPI_FREQ_DIV6
118 #else
119 #error "Unsupported base clock divider for wake-up frequency."
120 #endif
121
122 /* After the base clock divider is changed, some time is needed for the new
123 * setting to take effect. This value specifies the delay (in microseconds)
124 * to be applied to ensure that the clock is ready when the QSPI operation
125 * starts. It was measured with a logic analyzer (unfortunately, the nRF5340
126 * specification does not provide any numbers in this regard).
127 */
128 /* FIXME: This has adverse impact on performance, ~3Mbps, so, for now, it is
129 * disabled till further investigation.
130 */
131 #define BASE_CLOCK_SWITCH_DELAY_US 0
132
133 #else
134 /*
135 * On nRF52 Series SoCs, the base clock divider is not configurable,
136 * so BASE_CLOCK_DIV is not defined.
137 */
138 #if (QSPI_IF_DEVICE_FREQUENCY >= NRF_QSPI_BASE_CLOCK_FREQ)
139 #define INST_0_SCK_CFG NRF_QSPI_FREQ_DIV1
140 #else
141 #define INST_0_SCK_CFG (DIV_ROUND_UP(NRF_QSPI_BASE_CLOCK_FREQ, \
142 QSPI_IF_DEVICE_FREQUENCY) - 1)
143 #endif
144
145 /* For 8 MHz, use PCLK32M / 4 */
146 #define INST_0_SCK_CFG_WAKE NRF_QSPI_FREQ_DIV4
147
148 #endif /* defined(CONFIG_SOC_SERIES_NRF53X) */
149
150 static int qspi_device_init(const struct device *dev);
151 static void qspi_device_uninit(const struct device *dev);
152
153 #define WORD_SIZE 4
154
155 LOG_MODULE_DECLARE(wifi_nrf_bus, CONFIG_WIFI_NRF70_BUSLIB_LOG_LEVEL);
156
157 /**
158 * @brief QSPI buffer structure
159 * Structure used both for TX and RX purposes.
160 *
161 * @param buf is a valid pointer to a data buffer.
162 * Can not be NULL.
163 * @param len is the length of the data to be handled.
164 * If no data to transmit/receive - pass 0.
165 */
166 struct qspi_buf {
167 uint8_t *buf;
168 size_t len;
169 };
170
171 /**
172 * @brief QSPI command structure
173 * Structure used for custom command usage.
174 *
175 * @param op_code is a command value (i.e 0x9F - get Jedec ID)
176 * @param tx_buf structure used for TX purposes. Can be NULL if not used.
177 * @param rx_buf structure used for RX purposes. Can be NULL if not used.
178 */
179 struct qspi_cmd {
180 uint8_t op_code;
181 const struct qspi_buf *tx_buf;
182 const struct qspi_buf *rx_buf;
183 };
184
185 /**
186 * @brief Structure for defining the QSPI NOR access
187 */
188 struct qspi_nor_data {
189 #ifdef CONFIG_MULTITHREADING
190 /* The semaphore to control exclusive access on write/erase. */
191 struct k_sem trans;
192 /* The semaphore to control exclusive access to the device. */
193 struct k_sem sem;
194 /* The semaphore to indicate that transfer has completed. */
195 struct k_sem sync;
196 /* The semaphore to control driver init/uninit. */
197 struct k_sem count;
198 #else /* CONFIG_MULTITHREADING */
199 /* A flag that signals completed transfer when threads are
200 * not enabled.
201 */
202 volatile bool ready;
203 #endif /* CONFIG_MULTITHREADING */
204 };
205
qspi_get_mode(bool cpol,bool cpha)206 static inline int qspi_get_mode(bool cpol, bool cpha)
207 {
208 register int ret = -EINVAL;
209
210 if ((!cpol) && (!cpha)) {
211 ret = 0;
212 } else if (cpol && cpha) {
213 ret = 1;
214 }
215
216 __ASSERT(ret != -EINVAL, "Invalid QSPI mode");
217
218 return ret;
219 }
220
qspi_write_is_quad(nrf_qspi_writeoc_t lines)221 static inline bool qspi_write_is_quad(nrf_qspi_writeoc_t lines)
222 {
223 switch (lines) {
224 case NRF_QSPI_WRITEOC_PP4IO:
225 case NRF_QSPI_WRITEOC_PP4O:
226 return true;
227 default:
228 return false;
229 }
230 }
231
qspi_read_is_quad(nrf_qspi_readoc_t lines)232 static inline bool qspi_read_is_quad(nrf_qspi_readoc_t lines)
233 {
234 switch (lines) {
235 case NRF_QSPI_READOC_READ4IO:
236 case NRF_QSPI_READOC_READ4O:
237 return true;
238 default:
239 return false;
240 }
241 }
242
qspi_get_lines_write(uint8_t lines)243 static inline int qspi_get_lines_write(uint8_t lines)
244 {
245 register int ret = -EINVAL;
246
247 switch (lines) {
248 case 3:
249 ret = NRF_QSPI_WRITEOC_PP4IO;
250 break;
251 case 2:
252 ret = NRF_QSPI_WRITEOC_PP4O;
253 break;
254 case 1:
255 ret = NRF_QSPI_WRITEOC_PP2O;
256 break;
257 case 0:
258 ret = NRF_QSPI_WRITEOC_PP;
259 break;
260 default:
261 break;
262 }
263
264 __ASSERT(ret != -EINVAL, "Invalid QSPI write line");
265
266 return ret;
267 }
268
qspi_get_lines_read(uint8_t lines)269 static inline int qspi_get_lines_read(uint8_t lines)
270 {
271 register int ret = -EINVAL;
272
273 switch (lines) {
274 case 4:
275 ret = NRF_QSPI_READOC_READ4IO;
276 break;
277 case 3:
278 ret = NRF_QSPI_READOC_READ4O;
279 break;
280 case 2:
281 ret = NRF_QSPI_READOC_READ2IO;
282 break;
283 case 1:
284 ret = NRF_QSPI_READOC_READ2O;
285 break;
286 case 0:
287 ret = NRF_QSPI_READOC_FASTREAD;
288 break;
289 default:
290 break;
291 }
292
293 __ASSERT(ret != -EINVAL, "Invalid QSPI read line");
294
295 return ret;
296 }
297
_nrfx_qspi_read(void * p_rx_buffer,size_t rx_buffer_length,uint32_t src_address)298 nrfx_err_t _nrfx_qspi_read(void *p_rx_buffer, size_t rx_buffer_length, uint32_t src_address)
299 {
300 return nrfx_qspi_read(p_rx_buffer, rx_buffer_length, src_address);
301 }
302
_nrfx_qspi_write(void const * p_tx_buffer,size_t tx_buffer_length,uint32_t dst_address)303 nrfx_err_t _nrfx_qspi_write(void const *p_tx_buffer, size_t tx_buffer_length, uint32_t dst_address)
304 {
305 return nrfx_qspi_write(p_tx_buffer, tx_buffer_length, dst_address);
306 }
307
_nrfx_qspi_init(nrfx_qspi_config_t const * p_config,nrfx_qspi_handler_t handler,void * p_context)308 nrfx_err_t _nrfx_qspi_init(nrfx_qspi_config_t const *p_config, nrfx_qspi_handler_t handler,
309 void *p_context)
310 {
311 NRF_QSPI_Type *p_reg = NRF_QSPI;
312
313 nrfx_qspi_init(p_config, handler, p_context);
314
315 /* RDC4IO = 4'hA (register IFTIMING), which means 10 Dummy Cycles for READ4. */
316 p_reg->IFTIMING |= qspi_cfg->RDC4IO;
317
318 /* LOG_DBG("%04x : IFTIMING", p_reg->IFTIMING & qspi_cfg->RDC4IO); */
319
320 /* ACTIVATE task fails for slave bitfile so ignore it */
321 return NRFX_SUCCESS;
322 }
323
324
325 /**
326 * @brief Main configuration structure
327 */
328 static struct qspi_nor_data qspi_nor_memory_data = {
329 #ifdef CONFIG_MULTITHREADING
330 .trans = Z_SEM_INITIALIZER(qspi_nor_memory_data.trans, 1, 1),
331 .sem = Z_SEM_INITIALIZER(qspi_nor_memory_data.sem, 1, 1),
332 .sync = Z_SEM_INITIALIZER(qspi_nor_memory_data.sync, 0, 1),
333 .count = Z_SEM_INITIALIZER(qspi_nor_memory_data.count, 0, K_SEM_MAX_LIMIT),
334 #endif /* CONFIG_MULTITHREADING */
335 };
336
337 NRF_DT_CHECK_NODE_HAS_PINCTRL_SLEEP(QSPI_IF_BUS_NODE);
338
339 IF_ENABLED(CONFIG_PINCTRL, (PINCTRL_DT_DEFINE(QSPI_IF_BUS_NODE)));
340
341 /**
342 * @brief Converts NRFX return codes to the zephyr ones
343 */
qspi_get_zephyr_ret_code(nrfx_err_t res)344 static inline int qspi_get_zephyr_ret_code(nrfx_err_t res)
345 {
346 switch (res) {
347 case NRFX_SUCCESS:
348 return 0;
349 case NRFX_ERROR_INVALID_PARAM:
350 case NRFX_ERROR_INVALID_ADDR:
351 return -EINVAL;
352 case NRFX_ERROR_INVALID_STATE:
353 return -ECANCELED;
354 #if NRF53_ERRATA_159_ENABLE_WORKAROUND
355 case NRFX_ERROR_FORBIDDEN:
356 LOG_ERR("nRF5340 anomaly 159 conditions detected");
357 LOG_ERR("Set the CPU clock to 64 MHz before starting QSPI operation");
358 return -ECANCELED;
359 #endif
360 case NRFX_ERROR_BUSY:
361 case NRFX_ERROR_TIMEOUT:
362 default:
363 return -EBUSY;
364 }
365 }
366
get_dev_data(const struct device * dev)367 static inline struct qspi_nor_data *get_dev_data(const struct device *dev)
368 {
369 return dev->data;
370 }
371
qspi_lock(const struct device * dev)372 static inline void qspi_lock(const struct device *dev)
373 {
374 #ifdef CONFIG_MULTITHREADING
375 struct qspi_nor_data *dev_data = get_dev_data(dev);
376
377 k_sem_take(&dev_data->sem, K_FOREVER);
378 #else /* CONFIG_MULTITHREADING */
379 ARG_UNUSED(dev);
380 #endif /* CONFIG_MULTITHREADING */
381
382 /*
383 * Change the base clock divider only for the time the driver is locked
384 * to perform a QSPI operation, otherwise the power consumption would be
385 * increased also when the QSPI peripheral is idle.
386 */
387 #if defined(CONFIG_SOC_SERIES_NRF53X)
388 nrf_clock_hfclk192m_div_set(NRF_CLOCK, BASE_CLOCK_DIV);
389 k_busy_wait(BASE_CLOCK_SWITCH_DELAY_US);
390 #endif
391 }
392
qspi_unlock(const struct device * dev)393 static inline void qspi_unlock(const struct device *dev)
394 {
395 #if defined(CONFIG_SOC_SERIES_NRF53X)
396 /* Restore the default base clock divider to reduce power consumption.
397 */
398 nrf_clock_hfclk192m_div_set(NRF_CLOCK, NRF_CLOCK_HFCLK_DIV_4);
399 k_busy_wait(BASE_CLOCK_SWITCH_DELAY_US);
400 #endif
401
402 #ifdef CONFIG_MULTITHREADING
403 struct qspi_nor_data *dev_data = get_dev_data(dev);
404
405 k_sem_give(&dev_data->sem);
406 #else /* CONFIG_MULTITHREADING */
407 ARG_UNUSED(dev);
408 #endif /* CONFIG_MULTITHREADING */
409 }
410
qspi_trans_lock(const struct device * dev)411 static inline void qspi_trans_lock(const struct device *dev)
412 {
413 #ifdef CONFIG_MULTITHREADING
414 struct qspi_nor_data *dev_data = get_dev_data(dev);
415
416 k_sem_take(&dev_data->trans, K_FOREVER);
417 #else /* CONFIG_MULTITHREADING */
418 ARG_UNUSED(dev);
419 #endif /* CONFIG_MULTITHREADING */
420 }
421
qspi_trans_unlock(const struct device * dev)422 static inline void qspi_trans_unlock(const struct device *dev)
423 {
424 #ifdef CONFIG_MULTITHREADING
425 struct qspi_nor_data *dev_data = get_dev_data(dev);
426
427 k_sem_give(&dev_data->trans);
428 #else /* CONFIG_MULTITHREADING */
429 ARG_UNUSED(dev);
430 #endif /* CONFIG_MULTITHREADING */
431 }
432
qspi_wait_for_completion(const struct device * dev,nrfx_err_t res)433 static inline void qspi_wait_for_completion(const struct device *dev, nrfx_err_t res)
434 {
435 struct qspi_nor_data *dev_data = get_dev_data(dev);
436
437 if (res == NRFX_SUCCESS) {
438 #ifdef CONFIG_MULTITHREADING
439 k_sem_take(&dev_data->sync, K_FOREVER);
440 #else /* CONFIG_MULTITHREADING */
441 unsigned int key = irq_lock();
442
443 while (!dev_data->ready) {
444 k_cpu_atomic_idle(key);
445 key = irq_lock();
446 }
447
448 dev_data->ready = false;
449 irq_unlock(key);
450 #endif /* CONFIG_MULTITHREADING */
451 }
452 }
453
qspi_complete(struct qspi_nor_data * dev_data)454 static inline void qspi_complete(struct qspi_nor_data *dev_data)
455 {
456 #ifdef CONFIG_MULTITHREADING
457 k_sem_give(&dev_data->sync);
458 #else /* CONFIG_MULTITHREADING */
459 dev_data->ready = true;
460 #endif /* CONFIG_MULTITHREADING */
461 }
462
_qspi_complete(struct qspi_nor_data * dev_data)463 static inline void _qspi_complete(struct qspi_nor_data *dev_data)
464 {
465 if (!qspi_cfg->easydma) {
466 return;
467 }
468
469 qspi_complete(dev_data);
470 }
_qspi_wait_for_completion(const struct device * dev,nrfx_err_t res)471 static inline void _qspi_wait_for_completion(const struct device *dev, nrfx_err_t res)
472 {
473 if (!qspi_cfg->easydma) {
474 return;
475 }
476
477 qspi_wait_for_completion(dev, res);
478 }
479
480 /**
481 * @brief QSPI handler
482 *
483 * @param event Driver event type
484 * @param p_context Pointer to context. Use in interrupt handler.
485 * @retval None
486 */
qspi_handler(nrfx_qspi_evt_t event,void * p_context)487 static void qspi_handler(nrfx_qspi_evt_t event, void *p_context)
488 {
489 struct qspi_nor_data *dev_data = p_context;
490
491 if (event == NRFX_QSPI_EVENT_DONE) {
492 _qspi_complete(dev_data);
493 }
494 }
495
496 static bool qspi_initialized;
497
qspi_device_init(const struct device * dev)498 static int qspi_device_init(const struct device *dev)
499 {
500 struct qspi_nor_data *dev_data = get_dev_data(dev);
501 nrfx_err_t res;
502 int ret = 0;
503
504 if (!IS_ENABLED(CONFIG_NRF70_QSPI_LOW_POWER)) {
505 return 0;
506 }
507
508 qspi_lock(dev);
509
510 /* In multithreading, driver can call qspi_device_init more than once
511 * before calling qspi_device_uninit. Keepping count, so QSPI is
512 * uninitialized only at the last call (count == 0).
513 */
514 #ifdef CONFIG_MULTITHREADING
515 k_sem_give(&dev_data->count);
516 #endif
517
518 if (!qspi_initialized) {
519 res = nrfx_qspi_init(&QSPIconfig, qspi_handler, dev_data);
520 ret = qspi_get_zephyr_ret_code(res);
521 NRF_QSPI->IFTIMING |= qspi_cfg->RDC4IO;
522 qspi_initialized = (ret == 0);
523 }
524
525 qspi_unlock(dev);
526
527 return ret;
528 }
529
qspi_device_uninit(const struct device * dev)530 static void qspi_device_uninit(const struct device *dev)
531 {
532 bool last = true;
533
534 if (!IS_ENABLED(CONFIG_NRF70_QSPI_LOW_POWER)) {
535 return;
536 }
537
538 qspi_lock(dev);
539
540 #ifdef CONFIG_MULTITHREADING
541 struct qspi_nor_data *dev_data = get_dev_data(dev);
542
543 /* The last thread to finish using the driver uninit the QSPI */
544 (void)k_sem_take(&dev_data->count, K_NO_WAIT);
545 last = (k_sem_count_get(&dev_data->count) == 0);
546 #endif
547
548 if (last) {
549 while (nrfx_qspi_mem_busy_check() != NRFX_SUCCESS) {
550 if (IS_ENABLED(CONFIG_MULTITHREADING)) {
551 k_msleep(50);
552 } else {
553 k_busy_wait(50000);
554 }
555 }
556
557 nrfx_qspi_uninit();
558
559 #ifndef CONFIG_PINCTRL
560 nrf_gpio_cfg_output(QSPI_PROP_AT(csn_pins, 0));
561 nrf_gpio_pin_set(QSPI_PROP_AT(csn_pins, 0));
562 #endif
563
564 qspi_initialized = false;
565 }
566
567 qspi_unlock(dev);
568 }
569
570 /* QSPI send custom command.
571 *
572 * If this is used for both send and receive the buffer sizes must be
573 * equal and cover the whole transaction.
574 */
qspi_send_cmd(const struct device * dev,const struct qspi_cmd * cmd,bool wren)575 static int qspi_send_cmd(const struct device *dev, const struct qspi_cmd *cmd, bool wren)
576 {
577 /* Check input parameters */
578 if (!cmd) {
579 return -EINVAL;
580 }
581
582 const void *tx_buf = NULL;
583 size_t tx_len = 0;
584 void *rx_buf = NULL;
585 size_t rx_len = 0;
586 size_t xfer_len = sizeof(cmd->op_code);
587
588 if (cmd->tx_buf) {
589 tx_buf = cmd->tx_buf->buf;
590 tx_len = cmd->tx_buf->len;
591 }
592
593 if (cmd->rx_buf) {
594 rx_buf = cmd->rx_buf->buf;
595 rx_len = cmd->rx_buf->len;
596 }
597
598 if ((rx_len != 0) && (tx_len != 0)) {
599 if (rx_len != tx_len) {
600 return -EINVAL;
601 }
602
603 xfer_len += tx_len;
604 } else {
605 /* At least one of these is zero. */
606 xfer_len += tx_len + rx_len;
607 }
608
609 if (xfer_len > NRF_QSPI_CINSTR_LEN_9B) {
610 LOG_WRN("cinstr %02x transfer too long: %zu", cmd->op_code, xfer_len);
611
612 return -EINVAL;
613 }
614
615 nrf_qspi_cinstr_conf_t cinstr_cfg = {
616 .opcode = cmd->op_code,
617 .length = xfer_len,
618 .io2_level = true,
619 .io3_level = true,
620 .wipwait = false,
621 .wren = wren,
622 };
623
624 qspi_lock(dev);
625
626 int res = nrfx_qspi_cinstr_xfer(&cinstr_cfg, tx_buf, rx_buf);
627
628 qspi_unlock(dev);
629 return qspi_get_zephyr_ret_code(res);
630 }
631
632 /* RDSR wrapper. Negative value is error. */
qspi_rdsr(const struct device * dev)633 static int qspi_rdsr(const struct device *dev)
634 {
635 uint8_t sr = -1;
636 const struct qspi_buf sr_buf = {
637 .buf = &sr,
638 .len = sizeof(sr),
639 };
640 struct qspi_cmd cmd = {
641 .op_code = SPI_NOR_CMD_RDSR,
642 .rx_buf = &sr_buf,
643 };
644 int ret = qspi_send_cmd(dev, &cmd, false);
645
646 return (ret < 0) ? ret : sr;
647 }
648
649 /* Wait until RDSR confirms write is not in progress. */
qspi_wait_while_writing(const struct device * dev)650 static int qspi_wait_while_writing(const struct device *dev)
651 {
652 int ret;
653
654 do {
655 ret = qspi_rdsr(dev);
656 } while ((ret >= 0) && ((ret & SPI_NOR_WIP_BIT) != 0U));
657
658 return (ret < 0) ? ret : 0;
659 }
660
661 /**
662 * @brief Fills init struct
663 *
664 * @param config Pointer to the config struct provided by user
665 * @param initstruct Pointer to the configuration struct
666 * @retval None
667 */
qspi_fill_init_struct(nrfx_qspi_config_t * initstruct)668 static inline void qspi_fill_init_struct(nrfx_qspi_config_t *initstruct)
669 {
670 /* Configure XIP offset */
671 initstruct->xip_offset = 0;
672
673 #ifdef CONFIG_PINCTRL
674 initstruct->skip_gpio_cfg = true,
675 initstruct->skip_psel_cfg = true,
676 #else
677 /* Configure pins */
678 initstruct->pins.sck_pin = QSPI_IF_BUS_SCK_PIN;
679 initstruct->pins.csn_pin = QSPI_IF_BUS_CSN_PIN;
680 initstruct->pins.io0_pin = QSPI_IF_BUS_IO0_PIN;
681 initstruct->pins.io1_pin = QSPI_IF_BUS_IO1_PIN;
682 #if QSPI_IF_BUS_HAS_4_IO_PINS
683 initstruct->pins.io2_pin = QSPI_IF_BUS_IO2_PIN;
684 initstruct->pins.io3_pin = QSPI_IF_BUS_IO3_PIN;
685 #else
686 initstruct->pins.io2_pin = NRF_QSPI_PIN_NOT_CONNECTED;
687 initstruct->pins.io3_pin = NRF_QSPI_PIN_NOT_CONNECTED;
688 #endif
689 #endif /* CONFIG_PINCTRL */
690 /* Configure Protocol interface */
691 initstruct->prot_if.addrmode = NRF_QSPI_ADDRMODE_24BIT;
692
693 initstruct->prot_if.dpmconfig = false;
694
695 /* Configure physical interface */
696 initstruct->phy_if.sck_freq = INST_0_SCK_CFG;
697
698 /* Using MHZ fails checkpatch constant check */
699 if (QSPI_IF_DEVICE_FREQUENCY >= 16000000) {
700 qspi_cfg->qspi_slave_latency = 1;
701 }
702 initstruct->phy_if.sck_delay = QSPI_IF_DEVICE_RX_DELAY;
703 initstruct->phy_if.spi_mode = qspi_get_mode(QSPI_IF_DEVICE_CPOL, QSPI_IF_DEVICE_CPHA);
704
705 if (QSPI_IF_DEVICE_QUAD_MODE) {
706 initstruct->prot_if.readoc = NRF_QSPI_READOC_READ4IO;
707 initstruct->prot_if.writeoc = NRF_QSPI_WRITEOC_PP4IO;
708 } else {
709 initstruct->prot_if.readoc = NRF_QSPI_READOC_FASTREAD;
710 initstruct->prot_if.writeoc = NRF_QSPI_WRITEOC_PP;
711 }
712
713 initstruct->phy_if.dpmen = false;
714 }
715
716 /* Configures QSPI memory for the transfer */
qspi_nrfx_configure(const struct device * dev)717 static int qspi_nrfx_configure(const struct device *dev)
718 {
719 if (!dev) {
720 return -ENXIO;
721 }
722
723 struct qspi_nor_data *dev_data = dev->data;
724
725 qspi_fill_init_struct(&QSPIconfig);
726
727 #if defined(CONFIG_SOC_SERIES_NRF53X)
728 /* When the QSPI peripheral is activated, during the nrfx_qspi driver
729 * initialization, it reads the status of the connected flash chip.
730 * Make sure this transaction is performed with a valid base clock
731 * divider.
732 */
733 nrf_clock_hfclk192m_div_set(NRF_CLOCK, BASE_CLOCK_DIV);
734 k_busy_wait(BASE_CLOCK_SWITCH_DELAY_US);
735 #endif
736
737 nrfx_err_t res = _nrfx_qspi_init(&QSPIconfig, qspi_handler, dev_data);
738
739 #if defined(CONFIG_SOC_SERIES_NRF53X)
740 /* Restore the default /4 divider after the QSPI initialization. */
741 nrf_clock_hfclk192m_div_set(NRF_CLOCK, NRF_CLOCK_HFCLK_DIV_4);
742 k_busy_wait(BASE_CLOCK_SWITCH_DELAY_US);
743 #endif
744
745 int ret = qspi_get_zephyr_ret_code(res);
746
747 if (ret == 0) {
748 /* Set QE to match transfer mode. If not using quad
749 * it's OK to leave QE set, but doing so prevents use
750 * of WP#/RESET#/HOLD# which might be useful.
751 *
752 * Note build assert above ensures QER is S1B6. Other
753 * options require more logic.
754 */
755 ret = qspi_rdsr(dev);
756
757 if (ret < 0) {
758 LOG_ERR("RDSR failed: %d", ret);
759 return ret;
760 }
761
762 uint8_t sr = (uint8_t)ret;
763 bool qe_value = (qspi_write_is_quad(QSPIconfig.prot_if.writeoc)) ||
764 (qspi_read_is_quad(QSPIconfig.prot_if.readoc));
765 const uint8_t qe_mask = BIT(6); /* only S1B6 */
766 bool qe_state = ((sr & qe_mask) != 0U);
767
768 LOG_DBG("RDSR %02x QE %d need %d: %s", sr, qe_state, qe_value,
769 (qe_state != qe_value) ? "updating" : "no-change");
770
771 ret = 0;
772
773 if (qe_state != qe_value) {
774 const struct qspi_buf sr_buf = {
775 .buf = &sr,
776 .len = sizeof(sr),
777 };
778 struct qspi_cmd cmd = {
779 .op_code = SPI_NOR_CMD_WRSR,
780 .tx_buf = &sr_buf,
781 };
782
783 sr ^= qe_mask;
784 ret = qspi_send_cmd(dev, &cmd, true);
785
786 /* Writing SR can take some time, and further
787 * commands sent while it's happening can be
788 * corrupted. Wait.
789 */
790 if (ret == 0) {
791 ret = qspi_wait_while_writing(dev);
792 }
793 }
794
795 if (ret < 0) {
796 LOG_ERR("QE %s failed: %d", qe_value ? "set" : "clear", ret);
797 }
798 }
799
800 return ret;
801 }
802
read_non_aligned(const struct device * dev,int addr,void * dest,size_t size)803 static inline nrfx_err_t read_non_aligned(const struct device *dev, int addr, void *dest,
804 size_t size)
805 {
806 uint8_t __aligned(WORD_SIZE) buf[WORD_SIZE * 2];
807 uint8_t *dptr = dest;
808
809 int flash_prefix = (WORD_SIZE - (addr % WORD_SIZE)) % WORD_SIZE;
810
811 if (flash_prefix > size) {
812 flash_prefix = size;
813 }
814
815 int dest_prefix = (WORD_SIZE - (int)dptr % WORD_SIZE) % WORD_SIZE;
816
817 if (dest_prefix > size) {
818 dest_prefix = size;
819 }
820
821 int flash_suffix = (size - flash_prefix) % WORD_SIZE;
822 int flash_middle = size - flash_prefix - flash_suffix;
823 int dest_middle = size - dest_prefix - (size - dest_prefix) % WORD_SIZE;
824
825 if (flash_middle > dest_middle) {
826 flash_middle = dest_middle;
827 flash_suffix = size - flash_prefix - flash_middle;
828 }
829
830 nrfx_err_t res = NRFX_SUCCESS;
831
832 /* read from aligned flash to aligned memory */
833 if (flash_middle != 0) {
834 res = _nrfx_qspi_read(dptr + dest_prefix, flash_middle, addr + flash_prefix);
835
836 _qspi_wait_for_completion(dev, res);
837
838 if (res != NRFX_SUCCESS) {
839 return res;
840 }
841
842 /* perform shift in RAM */
843 if (flash_prefix != dest_prefix) {
844 memmove(dptr + flash_prefix, dptr + dest_prefix, flash_middle);
845 }
846 }
847
848 /* read prefix */
849 if (flash_prefix != 0) {
850 res = _nrfx_qspi_read(buf, WORD_SIZE, addr - (WORD_SIZE - flash_prefix));
851
852 _qspi_wait_for_completion(dev, res);
853
854 if (res != NRFX_SUCCESS) {
855 return res;
856 }
857
858 memcpy(dptr, buf + WORD_SIZE - flash_prefix, flash_prefix);
859 }
860
861 /* read suffix */
862 if (flash_suffix != 0) {
863 res = _nrfx_qspi_read(buf, WORD_SIZE * 2, addr + flash_prefix + flash_middle);
864
865 _qspi_wait_for_completion(dev, res);
866
867 if (res != NRFX_SUCCESS) {
868 return res;
869 }
870
871 memcpy(dptr + flash_prefix + flash_middle, buf, flash_suffix);
872 }
873
874 return res;
875 }
876
qspi_nor_read(const struct device * dev,int addr,void * dest,size_t size)877 static int qspi_nor_read(const struct device *dev, int addr, void *dest, size_t size)
878 {
879 if (!dest) {
880 return -EINVAL;
881 }
882
883 /* read size must be non-zero */
884 if (!size) {
885 return 0;
886 }
887
888 int rc = qspi_device_init(dev);
889
890 if (rc != 0) {
891 goto out;
892 }
893
894 qspi_lock(dev);
895
896 nrfx_err_t res = read_non_aligned(dev, addr, dest, size);
897
898 qspi_unlock(dev);
899
900 rc = qspi_get_zephyr_ret_code(res);
901
902 out:
903 qspi_device_uninit(dev);
904 return rc;
905 }
906
907 /* addr aligned, sptr not null, slen less than 4 */
write_sub_word(const struct device * dev,int addr,const void * sptr,size_t slen)908 static inline nrfx_err_t write_sub_word(const struct device *dev, int addr, const void *sptr,
909 size_t slen)
910 {
911 uint8_t __aligned(4) buf[4];
912 nrfx_err_t res;
913
914 /* read out the whole word so that unchanged data can be
915 * written back
916 */
917 res = _nrfx_qspi_read(buf, sizeof(buf), addr);
918 _qspi_wait_for_completion(dev, res);
919
920 if (res == NRFX_SUCCESS) {
921 memcpy(buf, sptr, slen);
922 res = _nrfx_qspi_write(buf, sizeof(buf), addr);
923 _qspi_wait_for_completion(dev, res);
924 }
925
926 return res;
927 }
928
qspi_nor_write(const struct device * dev,int addr,const void * src,size_t size)929 static int qspi_nor_write(const struct device *dev, int addr, const void *src, size_t size)
930 {
931 if (!src) {
932 return -EINVAL;
933 }
934
935 /* write size must be non-zero, less than 4, or a multiple of 4 */
936 if ((size == 0) || ((size > 4) && ((size % 4U) != 0))) {
937 return -EINVAL;
938 }
939
940 /* address must be 4-byte aligned */
941 if ((addr % 4U) != 0) {
942 return -EINVAL;
943 }
944
945 nrfx_err_t res = NRFX_SUCCESS;
946
947 int rc = qspi_device_init(dev);
948
949 if (rc != 0) {
950 goto out;
951 }
952
953 qspi_trans_lock(dev);
954
955 qspi_lock(dev);
956
957 if (size < 4U) {
958 res = write_sub_word(dev, addr, src, size);
959 } else {
960 res = _nrfx_qspi_write(src, size, addr);
961 _qspi_wait_for_completion(dev, res);
962 }
963
964 qspi_unlock(dev);
965
966 qspi_trans_unlock(dev);
967
968 rc = qspi_get_zephyr_ret_code(res);
969 out:
970 qspi_device_uninit(dev);
971 return rc;
972 }
973
974 /**
975 * @brief Configure the flash
976 *
977 * @param dev The flash device structure
978 * @param info The flash info structure
979 * @return 0 on success, negative errno code otherwise
980 */
qspi_nor_configure(const struct device * dev)981 static int qspi_nor_configure(const struct device *dev)
982 {
983 int ret = qspi_nrfx_configure(dev);
984
985 if (ret != 0) {
986 return ret;
987 }
988
989 qspi_device_uninit(dev);
990
991 return 0;
992 }
993
994 /**
995 * @brief Initialize and configure the flash
996 *
997 * @param name The flash name
998 * @return 0 on success, negative errno code otherwise
999 */
qspi_nor_init(const struct device * dev)1000 static int qspi_nor_init(const struct device *dev)
1001 {
1002 #ifdef CONFIG_PINCTRL
1003 int ret = pinctrl_apply_state(QSPI_IF_BUS_PINCTRL_DT_DEV_CONFIG_GET,
1004 PINCTRL_STATE_DEFAULT);
1005
1006 if (ret < 0) {
1007 return ret;
1008 }
1009 #endif
1010
1011 IRQ_CONNECT(QSPI_IF_BUS_IRQN,
1012 QSPI_IF_BUS_IRQ_PRIO,
1013 nrfx_isr,
1014 nrfx_qspi_irq_handler,
1015 0);
1016
1017 return qspi_nor_configure(dev);
1018 }
1019
1020 #if defined(CONFIG_SOC_SERIES_NRF53X)
qspi_cmd_encryption(const struct device * dev,nrf_qspi_encryption_t * p_cfg)1021 static int qspi_cmd_encryption(const struct device *dev, nrf_qspi_encryption_t *p_cfg)
1022 {
1023 const struct qspi_buf tx_buf = { .buf = (uint8_t *)&p_cfg->nonce[1],
1024 .len = sizeof(p_cfg->nonce[1]) };
1025 const struct qspi_cmd cmd = {
1026 .op_code = 0x4f,
1027 .tx_buf = &tx_buf,
1028 };
1029
1030 int ret = qspi_device_init(dev);
1031
1032 if (ret == 0) {
1033 ret = qspi_send_cmd(dev, &cmd, false);
1034 }
1035
1036 qspi_device_uninit(dev);
1037
1038 if (ret < 0) {
1039 LOG_DBG("cmd_encryption failed %d", ret);
1040 }
1041
1042 return ret;
1043 }
1044 #endif
1045
qspi_RDSR2(const struct device * dev,uint8_t * rdsr2)1046 int qspi_RDSR2(const struct device *dev, uint8_t *rdsr2)
1047 {
1048 int ret = 0;
1049 uint8_t sr = 0;
1050
1051 const struct qspi_buf sr_buf = {
1052 .buf = &sr,
1053 .len = sizeof(sr),
1054 };
1055 struct qspi_cmd cmd = {
1056 .op_code = 0x2f,
1057 .rx_buf = &sr_buf,
1058 };
1059
1060 ret = qspi_device_init(dev);
1061
1062 ret = qspi_send_cmd(dev, &cmd, false);
1063
1064 qspi_device_uninit(dev);
1065
1066 LOG_DBG("RDSR2 = 0x%x", sr);
1067
1068 if (ret == 0) {
1069 *rdsr2 = sr;
1070 }
1071
1072 return ret;
1073 }
1074
1075 /* Wait until RDSR2 confirms RPU_WAKE write is successful */
qspi_validate_rpu_wake_writecmd(const struct device * dev)1076 int qspi_validate_rpu_wake_writecmd(const struct device *dev)
1077 {
1078 int ret = 0;
1079 uint8_t rdsr2 = 0;
1080
1081 for (int ii = 0; ii < 1; ii++) {
1082 ret = qspi_RDSR2(dev, &rdsr2);
1083 if (!ret && (rdsr2 & RPU_WAKEUP_NOW)) {
1084 return 0;
1085 }
1086 }
1087
1088 return -1;
1089 }
1090
1091
qspi_RDSR1(const struct device * dev,uint8_t * rdsr1)1092 int qspi_RDSR1(const struct device *dev, uint8_t *rdsr1)
1093 {
1094 int ret = 0;
1095 uint8_t sr = 0;
1096
1097 const struct qspi_buf sr_buf = {
1098 .buf = &sr,
1099 .len = sizeof(sr),
1100 };
1101 struct qspi_cmd cmd = {
1102 .op_code = 0x1f,
1103 .rx_buf = &sr_buf,
1104 };
1105
1106 ret = qspi_device_init(dev);
1107
1108 ret = qspi_send_cmd(dev, &cmd, false);
1109
1110 qspi_device_uninit(dev);
1111
1112 LOG_DBG("RDSR1 = 0x%x", sr);
1113
1114 if (ret == 0) {
1115 *rdsr1 = sr;
1116 }
1117
1118 return ret;
1119 }
1120
1121 /* Wait until RDSR1 confirms RPU_AWAKE/RPU_READY */
qspi_wait_while_rpu_awake(const struct device * dev)1122 int qspi_wait_while_rpu_awake(const struct device *dev)
1123 {
1124 int ret;
1125 uint8_t val = 0;
1126
1127 for (int ii = 0; ii < 10; ii++) {
1128 ret = qspi_RDSR1(dev, &val);
1129
1130 LOG_DBG("RDSR1 = 0x%x", val);
1131
1132 if (!ret && (val & RPU_AWAKE_BIT)) {
1133 break;
1134 }
1135
1136 k_msleep(1);
1137 }
1138
1139 if (ret || !(val & RPU_AWAKE_BIT)) {
1140 LOG_ERR("RPU is not awake even after 10ms");
1141 return -1;
1142 }
1143
1144 /* Restore QSPI clock frequency from DTS */
1145 QSPIconfig.phy_if.sck_freq = INST_0_SCK_CFG;
1146
1147 return val;
1148 }
1149
qspi_WRSR2(const struct device * dev,uint8_t data)1150 int qspi_WRSR2(const struct device *dev, uint8_t data)
1151 {
1152 const struct qspi_buf tx_buf = {
1153 .buf = &data,
1154 .len = sizeof(data),
1155 };
1156 const struct qspi_cmd cmd = {
1157 .op_code = 0x3f,
1158 .tx_buf = &tx_buf,
1159 };
1160 int ret = qspi_device_init(dev);
1161
1162 if (ret == 0) {
1163 ret = qspi_send_cmd(dev, &cmd, false);
1164 }
1165
1166 qspi_device_uninit(dev);
1167
1168 if (ret < 0) {
1169 LOG_ERR("cmd_wakeup RPU failed %d", ret);
1170 }
1171
1172 return ret;
1173 }
1174
qspi_cmd_wakeup_rpu(const struct device * dev,uint8_t data)1175 int qspi_cmd_wakeup_rpu(const struct device *dev, uint8_t data)
1176 {
1177 int ret;
1178
1179 /* Waking RPU works reliably only with lowest frequency (8MHz) */
1180 QSPIconfig.phy_if.sck_freq = INST_0_SCK_CFG_WAKE;
1181
1182 ret = qspi_WRSR2(dev, data);
1183
1184 return ret;
1185 }
1186
1187 struct device qspi_perip = {
1188 .data = &qspi_nor_memory_data,
1189 };
1190
qspi_deinit(void)1191 int qspi_deinit(void)
1192 {
1193 LOG_DBG("TODO : %s", __func__);
1194
1195 return 0;
1196 }
1197
qspi_init(struct qspi_config * config)1198 int qspi_init(struct qspi_config *config)
1199 {
1200 unsigned int rc;
1201
1202 qspi_cfg = config;
1203
1204 config->readoc = config->quad_spi ? NRF_QSPI_READOC_READ4IO : NRF_QSPI_READOC_FASTREAD;
1205 config->writeoc = config->quad_spi ? NRF_QSPI_WRITEOC_PP4IO : NRF_QSPI_WRITEOC_PP;
1206
1207 rc = qspi_nor_init(&qspi_perip);
1208
1209 k_sem_init(&qspi_cfg->lock, 1, 1);
1210
1211 return rc;
1212 }
1213
qspi_update_nonce(unsigned int addr,int len,int hlread)1214 void qspi_update_nonce(unsigned int addr, int len, int hlread)
1215 {
1216 #if NRF_QSPI_HAS_XIP_ENC || NRF_QSPI_HAS_DMA_ENC
1217
1218 NRF_QSPI_Type *p_reg = NRF_QSPI;
1219
1220 if (!qspi_cfg->encryption) {
1221 return;
1222 }
1223
1224 if (nonce_last_addr == 0 || hlread) {
1225 p_reg->DMA_ENC.NONCE2 = ++nonce_cnt;
1226 } else if ((nonce_last_addr + 4) != addr) {
1227 p_reg->DMA_ENC.NONCE2 = ++nonce_cnt;
1228 }
1229
1230 nonce_last_addr = addr + len - 4;
1231
1232 #endif /*NRF_QSPI_HAS_XIP_ENC || NRF_QSPI_HAS_DMA_ENC*/
1233 }
1234
qspi_addr_check(unsigned int addr,const void * data,unsigned int len)1235 void qspi_addr_check(unsigned int addr, const void *data, unsigned int len)
1236 {
1237 if ((addr % 4 != 0) || (((unsigned int)data) % 4 != 0) || (len % 4 != 0)) {
1238 LOG_ERR("%s : Unaligned address %x %x %d %x %x", __func__, addr,
1239 (unsigned int)data, (addr % 4 != 0), (((unsigned int)data) % 4 != 0),
1240 (len % 4 != 0));
1241 }
1242 }
1243
qspi_write(unsigned int addr,const void * data,int len)1244 int qspi_write(unsigned int addr, const void *data, int len)
1245 {
1246 int status;
1247
1248 qspi_addr_check(addr, data, len);
1249
1250 addr |= qspi_cfg->addrmask;
1251
1252 k_sem_take(&qspi_cfg->lock, K_FOREVER);
1253
1254 qspi_update_nonce(addr, len, 0);
1255
1256 status = qspi_nor_write(&qspi_perip, addr, data, len);
1257
1258 k_sem_give(&qspi_cfg->lock);
1259
1260 return status;
1261 }
1262
qspi_read(unsigned int addr,void * data,int len)1263 int qspi_read(unsigned int addr, void *data, int len)
1264 {
1265 int status;
1266
1267 qspi_addr_check(addr, data, len);
1268
1269 addr |= qspi_cfg->addrmask;
1270
1271 k_sem_take(&qspi_cfg->lock, K_FOREVER);
1272
1273 qspi_update_nonce(addr, len, 0);
1274
1275 status = qspi_nor_read(&qspi_perip, addr, data, len);
1276
1277 k_sem_give(&qspi_cfg->lock);
1278
1279 return status;
1280 }
1281
qspi_hl_readw(unsigned int addr,void * data)1282 int qspi_hl_readw(unsigned int addr, void *data)
1283 {
1284 int status;
1285 uint8_t *rxb = NULL;
1286 uint32_t len = 4;
1287
1288 len = len + (4 * qspi_cfg->qspi_slave_latency);
1289
1290 rxb = k_malloc(len);
1291
1292 if (rxb == NULL) {
1293 LOG_ERR("%s: ERROR ENOMEM line %d", __func__, __LINE__);
1294 return -ENOMEM;
1295 }
1296
1297 memset(rxb, 0, len);
1298
1299 k_sem_take(&qspi_cfg->lock, K_FOREVER);
1300
1301 qspi_update_nonce(addr, 4, 1);
1302
1303 status = qspi_nor_read(&qspi_perip, addr, rxb, len);
1304
1305 k_sem_give(&qspi_cfg->lock);
1306
1307 *(uint32_t *)data = *(uint32_t *)(rxb + (len - 4));
1308
1309 k_free(rxb);
1310
1311 return status;
1312 }
1313
qspi_hl_read(unsigned int addr,void * data,int len)1314 int qspi_hl_read(unsigned int addr, void *data, int len)
1315 {
1316 int count = 0;
1317
1318 qspi_addr_check(addr, data, len);
1319
1320 while (count < (len / 4)) {
1321 qspi_hl_readw(addr + (4 * count), ((char *)data + (4 * count)));
1322 count++;
1323 }
1324
1325 return 0;
1326 }
1327
qspi_cmd_sleep_rpu(const struct device * dev)1328 int qspi_cmd_sleep_rpu(const struct device *dev)
1329 {
1330 uint8_t data = 0x0;
1331
1332 /* printf("TODO : %s:", __func__); */
1333 const struct qspi_buf tx_buf = {
1334 .buf = &data,
1335 .len = sizeof(data),
1336 };
1337
1338 const struct qspi_cmd cmd = {
1339 .op_code = 0x3f, /* 0x3f, //WRSR2(0x3F) WakeUP RPU. */
1340 .tx_buf = &tx_buf,
1341 };
1342
1343 int ret = qspi_device_init(dev);
1344
1345 if (ret == 0) {
1346 ret = qspi_send_cmd(dev, &cmd, false);
1347 }
1348
1349 qspi_device_uninit(dev);
1350
1351 if (ret < 0) {
1352 LOG_ERR("cmd_wakeup RPU failed: %d", ret);
1353 }
1354
1355 return ret;
1356 }
1357
1358 /* Encryption public API */
1359
qspi_enable_encryption(uint8_t * key)1360 int qspi_enable_encryption(uint8_t *key)
1361 {
1362 #if defined(CONFIG_SOC_SERIES_NRF53X)
1363 int err = 0;
1364
1365 if (qspi_cfg->encryption) {
1366 return -EALREADY;
1367 }
1368
1369 int ret = qspi_device_init(&qspi_perip);
1370
1371 if (ret != 0) {
1372 LOG_ERR("qspi_device_init failed: %d", ret);
1373 return -EIO;
1374 }
1375
1376 memcpy(qspi_cfg->p_cfg.key, key, 16);
1377
1378 err = nrfx_qspi_dma_encrypt(&qspi_cfg->p_cfg);
1379 if (err != NRFX_SUCCESS) {
1380 LOG_ERR("nrfx_qspi_dma_encrypt failed: %d", err);
1381 return -EIO;
1382 }
1383
1384 err = qspi_cmd_encryption(&qspi_perip, &qspi_cfg->p_cfg);
1385 if (err != 0) {
1386 LOG_ERR("qspi_cmd_encryption failed: %d", err);
1387 return -EIO;
1388 }
1389
1390 qspi_cfg->encryption = true;
1391
1392 qspi_device_uninit(&qspi_perip);
1393
1394 return 0;
1395 #else
1396 return -ENOTSUP;
1397 #endif
1398 }
1399