1 /*
2 * Copyright (c) 2023 Nuvoton Technology Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT nuvoton_npcx_fiu_qspi
8
9 #include <zephyr/drivers/clock_control.h>
10 #include <zephyr/drivers/flash/npcx_flash_api_ex.h>
11 #include <zephyr/drivers/pinctrl.h>
12 #include <zephyr/drivers/spi.h>
13 #include <zephyr/dt-bindings/flash_controller/npcx_fiu_qspi.h>
14 #include <soc.h>
15
16 #include "flash_npcx_fiu_qspi.h"
17
18 #include <zephyr/logging/log.h>
19 LOG_MODULE_REGISTER(npcx_fiu_qspi, LOG_LEVEL_ERR);
20
21 /* Driver convenience defines */
22 #define HAL_INSTANCE(dev) \
23 ((struct fiu_reg *)((const struct npcx_qspi_fiu_config *)(dev)->config)->base)
24
25 /* Device config */
26 struct npcx_qspi_fiu_config {
27 /* Flash interface unit base address */
28 uintptr_t base;
29 /* Clock configuration */
30 struct npcx_clk_cfg clk_cfg;
31 /* Enable 2 external SPI devices for direct read on QSPI bus */
32 bool en_direct_access_2dev;
33 };
34
35 /* Device data */
36 struct npcx_qspi_fiu_data {
37 /* mutex of qspi bus controller */
38 struct k_sem lock_sem;
39 /* Current device configuration on QSPI bus */
40 const struct npcx_qspi_cfg *cur_cfg;
41 /* Current Software controlled Chip-Select number */
42 int sw_cs;
43 /* Current QSPI bus operation */
44 uint32_t operation;
45 };
46
47 /* NPCX SPI User Mode Access (UMA) functions */
qspi_npcx_uma_cs_level(const struct device * dev,uint8_t sw_cs,bool level)48 static inline void qspi_npcx_uma_cs_level(const struct device *dev, uint8_t sw_cs, bool level)
49 {
50 struct fiu_reg *const inst = HAL_INSTANCE(dev);
51
52 /* Set chip select to high/low level */
53 if (level) {
54 inst->UMA_ECTS |= BIT(sw_cs);
55 } else {
56 inst->UMA_ECTS &= ~BIT(sw_cs);
57 }
58 }
59
qspi_npcx_uma_write_byte(const struct device * dev,uint8_t data)60 static inline void qspi_npcx_uma_write_byte(const struct device *dev, uint8_t data)
61 {
62 struct fiu_reg *const inst = HAL_INSTANCE(dev);
63
64 /* Set data to UMA_CODE and trigger UMA */
65 inst->UMA_CODE = data;
66 inst->UMA_CTS = UMA_CODE_CMD_WR_ONLY;
67 /* EXEC_DONE will be zero automatically if a UMA transaction is completed. */
68 while (IS_BIT_SET(inst->UMA_CTS, NPCX_UMA_CTS_EXEC_DONE)) {
69 continue;
70 }
71 }
72
qspi_npcx_uma_read_byte(const struct device * dev,uint8_t * data)73 static inline void qspi_npcx_uma_read_byte(const struct device *dev, uint8_t *data)
74 {
75 struct fiu_reg *const inst = HAL_INSTANCE(dev);
76
77 /* Trigger UMA and Get data from DB0 later */
78 inst->UMA_CTS = UMA_CODE_RD_BYTE(1);
79 while (IS_BIT_SET(inst->UMA_CTS, NPCX_UMA_CTS_EXEC_DONE)) {
80 continue;
81 }
82
83 *data = inst->UMA_DB0;
84 }
85
86 /* NPCX SPI Direct Read Access (DRA)/User Mode Access (UMA) configuration functions */
qspi_npcx_config_uma_mode(const struct device * dev,const struct npcx_qspi_cfg * qspi_cfg)87 static inline void qspi_npcx_config_uma_mode(const struct device *dev,
88 const struct npcx_qspi_cfg *qspi_cfg)
89 {
90 struct fiu_reg *const inst = HAL_INSTANCE(dev);
91
92 if ((qspi_cfg->flags & NPCX_QSPI_SEC_FLASH_SL) != 0) {
93 inst->UMA_ECTS |= BIT(NPCX_UMA_ECTS_SEC_CS);
94 } else {
95 inst->UMA_ECTS &= ~BIT(NPCX_UMA_ECTS_SEC_CS);
96 }
97 }
98
qspi_npcx_config_dra_4byte_mode(const struct device * dev,const struct npcx_qspi_cfg * qspi_cfg)99 static inline void qspi_npcx_config_dra_4byte_mode(const struct device *dev,
100 const struct npcx_qspi_cfg *qspi_cfg)
101 {
102 #if defined(CONFIG_FLASH_NPCX_FIU_SUPP_DRA_4B_ADDR)
103 struct fiu_reg *const inst = HAL_INSTANCE(dev);
104
105 #if defined(CONFIG_FLASH_NPCX_FIU_DRA_V1)
106 if (qspi_cfg->enter_4ba != 0) {
107 if ((qspi_cfg->flags & NPCX_QSPI_SEC_FLASH_SL) != 0) {
108 inst->SPI1_DEV |= BIT(NPCX_SPI1_DEV_FOUR_BADDR_CS11);
109 } else {
110 inst->SPI1_DEV |= BIT(NPCX_SPI1_DEV_FOUR_BADDR_CS10);
111 }
112 } else {
113 inst->SPI1_DEV &= ~(BIT(NPCX_SPI1_DEV_FOUR_BADDR_CS11) |
114 BIT(NPCX_SPI1_DEV_FOUR_BADDR_CS10));
115 }
116 #elif defined(CONFIG_FLASH_NPCX_FIU_DRA_V2)
117 if (qspi_cfg->enter_4ba != 0) {
118 SET_FIELD(inst->SPI_DEV, NPCX_SPI_DEV_NADDRB, NPCX_DEV_NUM_ADDR_4BYTE);
119 }
120 #endif
121 #endif /* CONFIG_FLASH_NPCX_FIU_SUPP_DRA_4B_ADDR */
122 }
123
qspi_npcx_config_dra_mode(const struct device * dev,const struct npcx_qspi_cfg * qspi_cfg)124 static inline void qspi_npcx_config_dra_mode(const struct device *dev,
125 const struct npcx_qspi_cfg *qspi_cfg)
126 {
127 struct fiu_reg *const inst = HAL_INSTANCE(dev);
128
129 /* Select SPI device number for DRA mode in npcx4 series */
130 if (IS_ENABLED(CONFIG_FLASH_NPCX_FIU_DRA_V2)) {
131 int spi_dev_num = (qspi_cfg->flags & NPCX_QSPI_SEC_FLASH_SL) != 0 ? 1 : 0;
132
133 SET_FIELD(inst->BURST_CFG, NPCX_BURST_CFG_SPI_DEV_SEL, spi_dev_num);
134 }
135
136 /* Enable quad mode of Direct Read Mode if needed */
137 if (qspi_cfg->qer_type != JESD216_DW15_QER_NONE) {
138 inst->RESP_CFG |= BIT(NPCX_RESP_CFG_QUAD_EN);
139 } else {
140 inst->RESP_CFG &= ~BIT(NPCX_RESP_CFG_QUAD_EN);
141 }
142
143 /* Selects the SPI read access type of Direct Read Access mode */
144 SET_FIELD(inst->SPI_FL_CFG, NPCX_SPI_FL_CFG_RD_MODE, qspi_cfg->rd_mode);
145
146 /* Enable/Disable 4 byte address mode for Direct Read Access (DRA) */
147 qspi_npcx_config_dra_4byte_mode(dev, qspi_cfg);
148 }
149
qspi_npcx_fiu_set_operation(const struct device * dev,uint32_t operation)150 static inline void qspi_npcx_fiu_set_operation(const struct device *dev, uint32_t operation)
151 {
152 if ((operation & NPCX_EX_OP_INT_FLASH_WP) != 0) {
153 npcx_pinctrl_flash_write_protect_set();
154 }
155 }
156
157 /* NPCX specific QSPI-FIU controller functions */
qspi_npcx_fiu_uma_transceive(const struct device * dev,struct npcx_uma_cfg * cfg,uint32_t flags)158 int qspi_npcx_fiu_uma_transceive(const struct device *dev, struct npcx_uma_cfg *cfg,
159 uint32_t flags)
160 {
161 struct npcx_qspi_fiu_data *const data = dev->data;
162
163 /* UMA transaction is permitted? */
164 if ((data->operation & NPCX_EX_OP_LOCK_UMA) != 0) {
165 return -EPERM;
166 }
167
168 /* Assert chip select */
169 qspi_npcx_uma_cs_level(dev, data->sw_cs, false);
170
171 /* Transmit op-code first */
172 qspi_npcx_uma_write_byte(dev, cfg->opcode);
173
174 if ((flags & NPCX_UMA_ACCESS_ADDR) != 0) {
175 /* 3-byte or 4-byte address? */
176 const int addr_start = (data->cur_cfg->enter_4ba != 0) ? 0 : 1;
177
178 for (size_t i = addr_start; i < 4; i++) {
179 LOG_DBG("addr %d, %02x", i, cfg->addr.u8[i]);
180 qspi_npcx_uma_write_byte(dev, cfg->addr.u8[i]);
181 }
182 }
183
184 if ((flags & NPCX_UMA_ACCESS_WRITE) != 0) {
185 if (cfg->tx_buf == NULL) {
186 return -EINVAL;
187 }
188 for (size_t i = 0; i < cfg->tx_count; i++) {
189 qspi_npcx_uma_write_byte(dev, cfg->tx_buf[i]);
190 }
191 }
192
193 if ((flags & NPCX_UMA_ACCESS_READ) != 0) {
194 if (cfg->rx_buf == NULL) {
195 return -EINVAL;
196 }
197 for (size_t i = 0; i < cfg->rx_count; i++) {
198 qspi_npcx_uma_read_byte(dev, cfg->rx_buf + i);
199 }
200 }
201
202 /* De-assert chip select */
203 qspi_npcx_uma_cs_level(dev, data->sw_cs, true);
204
205 return 0;
206 }
207
qspi_npcx_fiu_mutex_lock_configure(const struct device * dev,const struct npcx_qspi_cfg * cfg,const uint32_t operation)208 void qspi_npcx_fiu_mutex_lock_configure(const struct device *dev,
209 const struct npcx_qspi_cfg *cfg,
210 const uint32_t operation)
211 {
212 struct npcx_qspi_fiu_data *const data = dev->data;
213
214 k_sem_take(&data->lock_sem, K_FOREVER);
215
216 /* If the current device is different from previous one, configure it */
217 if (data->cur_cfg != cfg) {
218 data->cur_cfg = cfg;
219
220 /* Apply pin-muxing and tri-state */
221 pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
222
223 /* Configure User Mode Access (UMA) settings */
224 qspi_npcx_config_uma_mode(dev, cfg);
225
226 /* Configure for Direct Read Access (DRA) settings */
227 qspi_npcx_config_dra_mode(dev, cfg);
228
229 /* Save SW CS bit used in UMA mode */
230 data->sw_cs = find_lsb_set(cfg->flags & NPCX_QSPI_SW_CS_MASK) - 1;
231 }
232
233 /* Set QSPI bus operation */
234 if (data->operation != operation) {
235 qspi_npcx_fiu_set_operation(dev, operation);
236 data->operation = operation;
237 }
238 }
239
qspi_npcx_fiu_mutex_unlock(const struct device * dev)240 void qspi_npcx_fiu_mutex_unlock(const struct device *dev)
241 {
242 struct npcx_qspi_fiu_data *const data = dev->data;
243
244 k_sem_give(&data->lock_sem);
245 }
246
qspi_npcx_fiu_init(const struct device * dev)247 static int qspi_npcx_fiu_init(const struct device *dev)
248 {
249 const struct npcx_qspi_fiu_config *const config = dev->config;
250 struct npcx_qspi_fiu_data *const data = dev->data;
251 const struct device *const clk_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE);
252 int ret;
253
254 if (!device_is_ready(clk_dev)) {
255 LOG_ERR("%s device not ready", clk_dev->name);
256 return -ENODEV;
257 }
258
259 /* Turn on device clock first and get source clock freq. */
260 ret = clock_control_on(clk_dev,
261 (clock_control_subsys_t)&config->clk_cfg);
262 if (ret < 0) {
263 LOG_ERR("Turn on FIU clock fail %d", ret);
264 return ret;
265 }
266
267 /* initialize mutex for qspi controller */
268 k_sem_init(&data->lock_sem, 1, 1);
269
270 /* Enable direct access for 2 external SPI devices */
271 if (config->en_direct_access_2dev) {
272 #if defined(CONFIG_FLASH_NPCX_FIU_SUPP_DRA_2_DEV)
273 struct fiu_reg *const inst = HAL_INSTANCE(dev);
274
275 inst->FIU_EXT_CFG |= BIT(NPCX_FIU_EXT_CFG_SPI1_2DEV);
276 #endif
277 }
278
279 return 0;
280 }
281
282 #define NPCX_SPI_FIU_INIT(n) \
283 static const struct npcx_qspi_fiu_config npcx_qspi_fiu_config_##n = { \
284 .base = DT_INST_REG_ADDR(n), \
285 .clk_cfg = NPCX_DT_CLK_CFG_ITEM(n), \
286 .en_direct_access_2dev = DT_INST_PROP(n, en_direct_access_2dev), \
287 }; \
288 static struct npcx_qspi_fiu_data npcx_qspi_fiu_data_##n; \
289 DEVICE_DT_INST_DEFINE(n, qspi_npcx_fiu_init, NULL, \
290 &npcx_qspi_fiu_data_##n, &npcx_qspi_fiu_config_##n, \
291 PRE_KERNEL_1, CONFIG_FLASH_INIT_PRIORITY, NULL);
292
293 DT_INST_FOREACH_STATUS_OKAY(NPCX_SPI_FIU_INIT)
294