1 /*
2 * Copyright (c) 2023 Nuvoton Technology Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT nuvoton_npcx_fiu_nor
8
9 #include <zephyr/kernel.h>
10 #include <zephyr/drivers/flash.h>
11 #include <zephyr/drivers/flash/npcx_flash_api_ex.h>
12 #include <zephyr/drivers/pinctrl.h>
13 #include <zephyr/dt-bindings/flash_controller/npcx_fiu_qspi.h>
14 #include <soc.h>
15 #ifdef CONFIG_USERSPACE
16 #include <zephyr/syscall.h>
17 #include <zephyr/internal/syscall_handler.h>
18 #endif
19
20 #include "flash_npcx_fiu_qspi.h"
21 #include "spi_nor.h"
22
23 #include <zephyr/logging/log.h>
24 LOG_MODULE_REGISTER(flash_npcx_fiu_nor, CONFIG_FLASH_LOG_LEVEL);
25
26 #define BLOCK_64K_SIZE KB(64)
27 #define BLOCK_4K_SIZE KB(4)
28
29 /* Device config */
30 struct flash_npcx_nor_config {
31 /* QSPI bus device for mutex control and bus configuration */
32 const struct device *qspi_bus;
33 /* Mapped address for flash read via direct access */
34 uintptr_t mapped_addr;
35 /* Size of nor device in bytes, from size property */
36 uint32_t flash_size;
37 /* Maximum chip erase time-out in ms */
38 uint32_t max_timeout;
39 /* SPI Nor device configuration on QSPI bus */
40 struct npcx_qspi_cfg qspi_cfg;
41 #if defined(CONFIG_FLASH_PAGE_LAYOUT)
42 struct flash_pages_layout layout;
43 #endif
44 };
45
46 /* Device data */
47 struct flash_npcx_nor_data {
48 /* Specific control operation for Quad-SPI Nor Flash */
49 uint32_t operation;
50 };
51
52 static const struct flash_parameters flash_npcx_parameters = {
53 .write_block_size = 1,
54 .erase_value = 0xff,
55 };
56
57 #define DT_INST_QUAD_EN_PROP_OR(inst) \
58 COND_CODE_1(DT_INST_NODE_HAS_PROP(inst, quad_enable_requirements), \
59 (_CONCAT(JESD216_DW15_QER_VAL_, \
60 DT_INST_STRING_TOKEN(inst, quad_enable_requirements))), \
61 ((JESD216_DW15_QER_VAL_NONE)))
62
is_within_region(off_t addr,size_t size,off_t region_start,size_t region_size)63 static inline bool is_within_region(off_t addr, size_t size, off_t region_start,
64 size_t region_size)
65 {
66 return (addr >= region_start &&
67 (addr < (region_start + region_size)) &&
68 ((addr + size) <= (region_start + region_size)));
69 }
70
flash_npcx_uma_transceive(const struct device * dev,struct npcx_uma_cfg * cfg,uint32_t flags)71 static int flash_npcx_uma_transceive(const struct device *dev, struct npcx_uma_cfg *cfg,
72 uint32_t flags)
73 {
74 const struct flash_npcx_nor_config *config = dev->config;
75 struct flash_npcx_nor_data *data = dev->data;
76 int ret;
77
78 /* Lock SPI bus and configure it if needed */
79 qspi_npcx_fiu_mutex_lock_configure(config->qspi_bus, &config->qspi_cfg,
80 data->operation);
81
82 /* Execute UMA transaction */
83 ret = qspi_npcx_fiu_uma_transceive(config->qspi_bus, cfg, flags);
84
85 /* Unlock SPI bus */
86 qspi_npcx_fiu_mutex_unlock(config->qspi_bus);
87
88 return ret;
89 }
90
91 /* NPCX UMA functions for SPI NOR flash */
flash_npcx_uma_cmd_only(const struct device * dev,uint8_t opcode)92 static int flash_npcx_uma_cmd_only(const struct device *dev, uint8_t opcode)
93 {
94 struct npcx_uma_cfg cfg = { .opcode = opcode};
95
96 return flash_npcx_uma_transceive(dev, &cfg, 0); /* opcode only */
97 }
98
flash_npcx_uma_cmd_by_addr(const struct device * dev,uint8_t opcode,uint32_t addr)99 static int flash_npcx_uma_cmd_by_addr(const struct device *dev, uint8_t opcode,
100 uint32_t addr)
101 {
102 struct npcx_uma_cfg cfg = { .opcode = opcode};
103
104 cfg.addr.u32 = sys_cpu_to_be32(addr);
105 return flash_npcx_uma_transceive(dev, &cfg, NPCX_UMA_ACCESS_ADDR);
106 }
107
flash_npcx_uma_read(const struct device * dev,uint8_t opcode,uint8_t * dst,const size_t size)108 static int flash_npcx_uma_read(const struct device *dev, uint8_t opcode,
109 uint8_t *dst, const size_t size)
110 {
111 struct npcx_uma_cfg cfg = { .opcode = opcode,
112 .rx_buf = dst,
113 .rx_count = size};
114
115 return flash_npcx_uma_transceive(dev, &cfg, NPCX_UMA_ACCESS_READ);
116 }
117
flash_npcx_uma_write(const struct device * dev,uint8_t opcode,uint8_t * src,const size_t size)118 static int flash_npcx_uma_write(const struct device *dev, uint8_t opcode,
119 uint8_t *src, const size_t size)
120 {
121 struct npcx_uma_cfg cfg = { .opcode = opcode,
122 .tx_buf = src,
123 .tx_count = size};
124
125 return flash_npcx_uma_transceive(dev, &cfg, NPCX_UMA_ACCESS_WRITE);
126 }
127
flash_npcx_uma_write_by_addr(const struct device * dev,uint8_t opcode,uint8_t * src,const size_t size,uint32_t addr)128 static int flash_npcx_uma_write_by_addr(const struct device *dev, uint8_t opcode,
129 uint8_t *src, const size_t size, uint32_t addr)
130 {
131 struct npcx_uma_cfg cfg = { .opcode = opcode,
132 .tx_buf = src,
133 .tx_count = size};
134
135 cfg.addr.u32 = sys_cpu_to_be32(addr);
136 return flash_npcx_uma_transceive(dev, &cfg, NPCX_UMA_ACCESS_WRITE |
137 NPCX_UMA_ACCESS_ADDR);
138 }
139
140 /* Local SPI NOR flash functions */
flash_npcx_nor_wait_until_ready(const struct device * dev)141 static int flash_npcx_nor_wait_until_ready(const struct device *dev)
142 {
143 int ret;
144 uint8_t reg;
145 const struct flash_npcx_nor_config *config = dev->config;
146 int64_t st = k_uptime_get();
147
148 do {
149 ret = flash_npcx_uma_read(dev, SPI_NOR_CMD_RDSR, ®, sizeof(reg));
150 if (ret != 0) {
151 return ret;
152 } else if ((reg & SPI_NOR_WIP_BIT) == 0) {
153 return 0;
154 }
155
156 } while ((k_uptime_get() - st) < config->max_timeout);
157
158 return -EBUSY;
159 }
160
flash_npcx_nor_read_status_regs(const struct device * dev,uint8_t * sts_reg)161 static int flash_npcx_nor_read_status_regs(const struct device *dev, uint8_t *sts_reg)
162 {
163 int ret = flash_npcx_uma_read(dev, SPI_NOR_CMD_RDSR, sts_reg, 1);
164
165 if (ret != 0) {
166 return ret;
167 }
168 return flash_npcx_uma_read(dev, SPI_NOR_CMD_RDSR2, sts_reg + 1, 1);
169 }
170
flash_npcx_nor_write_status_regs(const struct device * dev,uint8_t * sts_reg)171 static int flash_npcx_nor_write_status_regs(const struct device *dev, uint8_t *sts_reg)
172 {
173 int ret;
174
175 ret = flash_npcx_uma_cmd_only(dev, SPI_NOR_CMD_WREN);
176 if (ret != 0) {
177 return ret;
178 }
179
180 ret = flash_npcx_uma_write(dev, SPI_NOR_CMD_WRSR, sts_reg, 2);
181 if (ret != 0) {
182 return ret;
183 }
184
185 return flash_npcx_nor_wait_until_ready(dev);
186 }
187
188 /* Flash API functions */
189 #if defined(CONFIG_FLASH_JESD216_API)
flash_npcx_nor_read_jedec_id(const struct device * dev,uint8_t * id)190 static int flash_npcx_nor_read_jedec_id(const struct device *dev, uint8_t *id)
191 {
192 if (id == NULL) {
193 return -EINVAL;
194 }
195
196 return flash_npcx_uma_read(dev, SPI_NOR_CMD_RDID, id, SPI_NOR_MAX_ID_LEN);
197 }
198
flash_npcx_nor_read_sfdp(const struct device * dev,off_t addr,void * data,size_t size)199 static int flash_npcx_nor_read_sfdp(const struct device *dev, off_t addr,
200 void *data, size_t size)
201 {
202 uint8_t sfdp_addr[4];
203 struct npcx_uma_cfg cfg = { .opcode = JESD216_CMD_READ_SFDP,
204 .tx_buf = sfdp_addr,
205 .tx_count = 4,
206 .rx_buf = data,
207 .rx_count = size};
208
209 if (data == NULL) {
210 return -EINVAL;
211 }
212
213 /* CMD_READ_SFDP needs a 24-bit address followed by a dummy byte */
214 sfdp_addr[0] = (addr >> 16) & 0xff;
215 sfdp_addr[1] = (addr >> 8) & 0xff;
216 sfdp_addr[2] = addr & 0xff;
217 return flash_npcx_uma_transceive(dev, &cfg, NPCX_UMA_ACCESS_WRITE |
218 NPCX_UMA_ACCESS_READ);
219 }
220 #endif /* CONFIG_FLASH_JESD216_API */
221
222 #if defined(CONFIG_FLASH_PAGE_LAYOUT)
flash_npcx_nor_pages_layout(const struct device * dev,const struct flash_pages_layout ** layout,size_t * layout_size)223 static void flash_npcx_nor_pages_layout(const struct device *dev,
224 const struct flash_pages_layout **layout,
225 size_t *layout_size)
226 {
227 const struct flash_npcx_nor_config *config = dev->config;
228
229 *layout = &config->layout;
230 *layout_size = 1;
231 }
232 #endif /* CONFIG_FLASH_PAGE_LAYOUT */
233
flash_npcx_nor_read(const struct device * dev,off_t addr,void * data,size_t size)234 static int flash_npcx_nor_read(const struct device *dev, off_t addr,
235 void *data, size_t size)
236 {
237 const struct flash_npcx_nor_config *config = dev->config;
238 struct flash_npcx_nor_data *dev_data = dev->data;
239
240 /* Out of the region of nor flash device? */
241 if (!is_within_region(addr, size, 0, config->flash_size)) {
242 return -EINVAL;
243 }
244
245 /* Lock/Unlock SPI bus also for DRA mode */
246 qspi_npcx_fiu_mutex_lock_configure(config->qspi_bus, &config->qspi_cfg,
247 dev_data->operation);
248
249 /* Trigger Direct Read Access (DRA) via reading memory mapped-address */
250 memcpy(data, (void *)(config->mapped_addr + addr), size);
251
252 qspi_npcx_fiu_mutex_unlock(config->qspi_bus);
253
254 return 0;
255 }
256
flash_npcx_nor_erase(const struct device * dev,off_t addr,size_t size)257 static int flash_npcx_nor_erase(const struct device *dev, off_t addr, size_t size)
258 {
259 const struct flash_npcx_nor_config *config = dev->config;
260 int ret = 0;
261
262 /* Out of the region of nor flash device? */
263 if (!is_within_region(addr, size, 0, config->flash_size)) {
264 LOG_ERR("Addr %ld, size %d are out of range", addr, size);
265 return -EINVAL;
266 }
267
268 /* address must be sector-aligned */
269 if (!SPI_NOR_IS_SECTOR_ALIGNED(addr)) {
270 LOG_ERR("Addr %ld is not sector-aligned", addr);
271 return -EINVAL;
272 }
273
274 /* size must be a multiple of sectors */
275 if ((size % BLOCK_4K_SIZE) != 0) {
276 LOG_ERR("Size %d is not a multiple of sectors", size);
277 return -EINVAL;
278 }
279
280 /* Select erase opcode by size */
281 if (size == config->flash_size) {
282 flash_npcx_uma_cmd_only(dev, SPI_NOR_CMD_WREN);
283 /* Send chip erase command */
284 flash_npcx_uma_cmd_only(dev, SPI_NOR_CMD_CE);
285 return flash_npcx_nor_wait_until_ready(dev);
286 }
287
288 while (size > 0) {
289 flash_npcx_uma_cmd_only(dev, SPI_NOR_CMD_WREN);
290 /* Send page/block erase command with addr */
291 if ((size >= BLOCK_64K_SIZE) && SPI_NOR_IS_64K_ALIGNED(addr)) {
292 flash_npcx_uma_cmd_by_addr(dev, SPI_NOR_CMD_BE, addr);
293 addr += BLOCK_64K_SIZE;
294 size -= BLOCK_64K_SIZE;
295 } else {
296 flash_npcx_uma_cmd_by_addr(dev, SPI_NOR_CMD_SE, addr);
297 addr += BLOCK_4K_SIZE;
298 size -= BLOCK_4K_SIZE;
299 }
300 ret = flash_npcx_nor_wait_until_ready(dev);
301 if (ret != 0) {
302 break;
303 }
304 }
305
306 return ret;
307 }
308
flash_npcx_nor_write(const struct device * dev,off_t addr,const void * data,size_t size)309 static int flash_npcx_nor_write(const struct device *dev, off_t addr,
310 const void *data, size_t size)
311 {
312 const struct flash_npcx_nor_config *config = dev->config;
313 uint8_t *tx_buf = (uint8_t *)data;
314 int ret = 0;
315 size_t sz_write;
316
317 /* Out of the region of nor flash device? */
318 if (!is_within_region(addr, size, 0, config->flash_size)) {
319 return -EINVAL;
320 }
321
322 /* Don't write more than a page. */
323 if (size > SPI_NOR_PAGE_SIZE) {
324 sz_write = SPI_NOR_PAGE_SIZE;
325 } else {
326 sz_write = size;
327 }
328
329 /*
330 * Correct the size of first write to not go through page boundary and
331 * make the address of next write to align to page boundary.
332 */
333 if (((addr + sz_write - 1U) / SPI_NOR_PAGE_SIZE) != (addr / SPI_NOR_PAGE_SIZE)) {
334 sz_write -= (addr + sz_write) & (SPI_NOR_PAGE_SIZE - 1);
335 }
336
337 while (size > 0) {
338 /* Start to write */
339 flash_npcx_uma_cmd_only(dev, SPI_NOR_CMD_WREN);
340 ret = flash_npcx_uma_write_by_addr(dev, SPI_NOR_CMD_PP, tx_buf,
341 sz_write, addr);
342 if (ret != 0) {
343 break;
344 }
345
346 /* Wait for writing completed */
347 ret = flash_npcx_nor_wait_until_ready(dev);
348 if (ret != 0) {
349 break;
350 }
351
352 size -= sz_write;
353 tx_buf += sz_write;
354 addr += sz_write;
355
356 if (size > SPI_NOR_PAGE_SIZE) {
357 sz_write = SPI_NOR_PAGE_SIZE;
358 } else {
359 sz_write = size;
360 }
361 }
362
363 return ret;
364 }
365
366 static const struct flash_parameters *
flash_npcx_nor_get_parameters(const struct device * dev)367 flash_npcx_nor_get_parameters(const struct device *dev)
368 {
369 ARG_UNUSED(dev);
370
371 return &flash_npcx_parameters;
372 };
373
374 #ifdef CONFIG_FLASH_EX_OP_ENABLED
flash_npcx_nor_ex_exec_uma(const struct device * dev,const struct npcx_ex_ops_uma_in * op_in,const struct npcx_ex_ops_uma_out * op_out)375 static int flash_npcx_nor_ex_exec_uma(const struct device *dev,
376 const struct npcx_ex_ops_uma_in *op_in,
377 const struct npcx_ex_ops_uma_out *op_out)
378 {
379 int flag = 0;
380 struct npcx_uma_cfg cfg;
381
382 if (op_in == NULL) {
383 return -EINVAL;
384 }
385
386 /* Organize a UMA transaction */
387 cfg.opcode = op_in->opcode;
388 if (op_in->tx_count != 0) {
389 cfg.tx_buf = op_in->tx_buf;
390 cfg.tx_count = op_in->tx_count;
391 flag |= NPCX_UMA_ACCESS_WRITE;
392 }
393
394 if (op_in->addr_count != 0) {
395 cfg.addr.u32 = sys_cpu_to_be32(op_in->addr);
396 flag |= NPCX_UMA_ACCESS_ADDR;
397 }
398
399 if (op_out != NULL && op_in->rx_count != 0) {
400 cfg.rx_buf = op_out->rx_buf;
401 cfg.rx_count = op_in->rx_count;
402 flag |= NPCX_UMA_ACCESS_READ;
403 }
404
405 return flash_npcx_uma_transceive(dev, &cfg, flag);
406 }
407
flash_npcx_nor_ex_set_spi_spec(const struct device * dev,const struct npcx_ex_ops_qspi_oper_in * op_in)408 static int flash_npcx_nor_ex_set_spi_spec(const struct device *dev,
409 const struct npcx_ex_ops_qspi_oper_in *op_in)
410 {
411 struct flash_npcx_nor_data *data = dev->data;
412
413 /* Cannot disable write protection of internal flash */
414 if ((data->operation & NPCX_EX_OP_INT_FLASH_WP) != 0) {
415 if ((op_in->mask & NPCX_EX_OP_INT_FLASH_WP) != 0 && !op_in->enable) {
416 return -EINVAL;
417 }
418 }
419
420 if (op_in->enable) {
421 data->operation |= op_in->mask;
422 } else {
423 data->operation &= ~op_in->mask;
424 }
425
426 return 0;
427 }
428
flash_npcx_nor_ex_get_spi_spec(const struct device * dev,struct npcx_ex_ops_qspi_oper_out * op_out)429 static int flash_npcx_nor_ex_get_spi_spec(const struct device *dev,
430 struct npcx_ex_ops_qspi_oper_out *op_out)
431 {
432 struct flash_npcx_nor_data *data = dev->data;
433
434 op_out->oper = data->operation;
435 return 0;
436 }
437
flash_npcx_nor_ex_op(const struct device * dev,uint16_t code,const uintptr_t in,void * out)438 static int flash_npcx_nor_ex_op(const struct device *dev, uint16_t code,
439 const uintptr_t in, void *out)
440 {
441 #ifdef CONFIG_USERSPACE
442 bool syscall_trap = z_syscall_trap();
443 #endif
444 int ret;
445
446 switch (code) {
447 case FLASH_NPCX_EX_OP_EXEC_UMA:
448 {
449 struct npcx_ex_ops_uma_in *op_in = (struct npcx_ex_ops_uma_in *)in;
450 struct npcx_ex_ops_uma_out *op_out = (struct npcx_ex_ops_uma_out *)out;
451 #ifdef CONFIG_USERSPACE
452 struct npcx_ex_ops_uma_in in_copy;
453 struct npcx_ex_ops_uma_out out_copy;
454
455 if (syscall_trap) {
456 K_OOPS(k_usermode_from_copy(&in_copy, op_in, sizeof(in_copy)));
457 op_in = &in_copy;
458 op_out = &out_copy;
459 }
460 #endif
461
462 ret = flash_npcx_nor_ex_exec_uma(dev, op_in, op_out);
463 #ifdef CONFIG_USERSPACE
464 if (ret == 0 && syscall_trap) {
465 K_OOPS(k_usermode_to_copy(out, op_out, sizeof(out_copy)));
466 }
467 #endif
468 break;
469 }
470 case FLASH_NPCX_EX_OP_SET_QSPI_OPER:
471 {
472 struct npcx_ex_ops_qspi_oper_in *op_in = (struct npcx_ex_ops_qspi_oper_in *)in;
473 #ifdef CONFIG_USERSPACE
474 struct npcx_ex_ops_qspi_oper_in in_copy;
475
476 if (syscall_trap) {
477 K_OOPS(k_usermode_from_copy(&in_copy, op_in, sizeof(in_copy)));
478 op_in = &in_copy;
479 }
480 #endif
481 ret = flash_npcx_nor_ex_set_spi_spec(dev, op_in);
482 break;
483 }
484 case FLASH_NPCX_EX_OP_GET_QSPI_OPER:
485 {
486 struct npcx_ex_ops_qspi_oper_out *op_out =
487 (struct npcx_ex_ops_qspi_oper_out *)out;
488 #ifdef CONFIG_USERSPACE
489 struct npcx_ex_ops_qspi_oper_out out_copy;
490
491 if (syscall_trap) {
492 op_out = &out_copy;
493 }
494 #endif
495 ret = flash_npcx_nor_ex_get_spi_spec(dev, op_out);
496 #ifdef CONFIG_USERSPACE
497 if (ret == 0 && syscall_trap) {
498 K_OOPS(k_usermode_to_copy(out, op_out, sizeof(out_copy)));
499 }
500 #endif
501 break;
502 }
503 default:
504 ret = -ENOTSUP;
505 break;
506 }
507
508 return ret;
509 }
510 #endif
511
512 static const struct flash_driver_api flash_npcx_nor_driver_api = {
513 .read = flash_npcx_nor_read,
514 .write = flash_npcx_nor_write,
515 .erase = flash_npcx_nor_erase,
516 .get_parameters = flash_npcx_nor_get_parameters,
517 #if defined(CONFIG_FLASH_PAGE_LAYOUT)
518 .page_layout = flash_npcx_nor_pages_layout,
519 #endif
520 #if defined(CONFIG_FLASH_JESD216_API)
521 .sfdp_read = flash_npcx_nor_read_sfdp,
522 .read_jedec_id = flash_npcx_nor_read_jedec_id,
523 #endif
524 #ifdef CONFIG_FLASH_EX_OP_ENABLED
525 .ex_op = flash_npcx_nor_ex_op,
526 #endif
527 };
528
flash_npcx_nor_init(const struct device * dev)529 static int flash_npcx_nor_init(const struct device *dev)
530 {
531 const struct flash_npcx_nor_config *config = dev->config;
532 int ret;
533
534 if (!IS_ENABLED(CONFIG_FLASH_NPCX_FIU_NOR_INIT)) {
535 return 0;
536 }
537
538 /* Enable quad access of spi NOR flash */
539 if (config->qspi_cfg.qer_type != JESD216_DW15_QER_NONE) {
540 uint8_t qe_idx, qe_bit, sts_reg[2];
541 /* Read status registers first */
542 ret = flash_npcx_nor_read_status_regs(dev, sts_reg);
543 if (ret != 0) {
544 LOG_ERR("Enable quad access: read reg failed %d!", ret);
545 return ret;
546 }
547 switch (config->qspi_cfg.qer_type) {
548 case JESD216_DW15_QER_S1B6:
549 qe_idx = 1;
550 qe_bit = 6;
551 break;
552 case JESD216_DW15_QER_S2B1v1:
553 __fallthrough;
554 case JESD216_DW15_QER_S2B1v4:
555 __fallthrough;
556 case JESD216_DW15_QER_S2B1v5:
557 qe_idx = 2;
558 qe_bit = 1;
559 break;
560 default:
561 return -ENOTSUP;
562 }
563 /* Set QE bit in status register */
564 sts_reg[qe_idx - 1] |= BIT(qe_bit);
565 ret = flash_npcx_nor_write_status_regs(dev, sts_reg);
566 if (ret != 0) {
567 LOG_ERR("Enable quad access: write reg failed %d!", ret);
568 return ret;
569 }
570 }
571
572 /* Enable 4-byte address of spi NOR flash */
573 if (config->qspi_cfg.enter_4ba != 0) {
574 bool wr_en = (config->qspi_cfg.enter_4ba & 0x02) != 0;
575
576 if (wr_en) {
577 ret = flash_npcx_uma_cmd_only(dev, SPI_NOR_CMD_WREN);
578 if (ret != 0) {
579 LOG_ERR("Enable 4byte addr: WREN failed %d!", ret);
580 return ret;
581 }
582 }
583 ret = flash_npcx_uma_cmd_only(dev, SPI_NOR_CMD_4BA);
584 if (ret != 0) {
585 LOG_ERR("Enable 4byte addr: 4BA failed %d!", ret);
586 return ret;
587 }
588 }
589
590 return 0;
591 }
592
593 #define NPCX_FLASH_NOR_INIT(n) \
594 BUILD_ASSERT(DT_INST_QUAD_EN_PROP_OR(n) == JESD216_DW15_QER_NONE || \
595 DT_INST_STRING_TOKEN(n, rd_mode) == NPCX_RD_MODE_FAST_DUAL, \
596 "Fast Dual IO read must be selected in Quad mode"); \
597 PINCTRL_DT_INST_DEFINE(n); \
598 static const struct flash_npcx_nor_config flash_npcx_nor_config_##n = { \
599 .qspi_bus = DEVICE_DT_GET(DT_PARENT(DT_DRV_INST(n))), \
600 .mapped_addr = DT_INST_PROP(n, mapped_addr), \
601 .flash_size = DT_INST_PROP(n, size) / 8, \
602 .max_timeout = DT_INST_PROP(n, max_timeout), \
603 .qspi_cfg = { \
604 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
605 .flags = DT_INST_PROP(n, qspi_flags), \
606 .enter_4ba = DT_INST_PROP_OR(n, enter_4byte_addr, 0), \
607 .qer_type = DT_INST_QUAD_EN_PROP_OR(n), \
608 .rd_mode = DT_INST_STRING_TOKEN(n, rd_mode), \
609 }, \
610 IF_ENABLED(CONFIG_FLASH_PAGE_LAYOUT, ( \
611 .layout = { \
612 .pages_count = DT_INST_PROP(n, size) / \
613 (8 * SPI_NOR_PAGE_SIZE), \
614 .pages_size = SPI_NOR_PAGE_SIZE, \
615 },)) \
616 }; \
617 static struct flash_npcx_nor_data flash_npcx_nor_data_##n; \
618 DEVICE_DT_INST_DEFINE(n, flash_npcx_nor_init, NULL, \
619 &flash_npcx_nor_data_##n, &flash_npcx_nor_config_##n, \
620 POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY, \
621 &flash_npcx_nor_driver_api);
622
623 DT_INST_FOREACH_STATUS_OKAY(NPCX_FLASH_NOR_INIT)
624