1 /*
2 * Copyright 2020-2023 NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT nxp_imx_flexspi
8
9 #include <zephyr/logging/log.h>
10 #include <zephyr/sys/util.h>
11 #include <zephyr/drivers/pinctrl.h>
12 #include <zephyr/drivers/clock_control.h>
13 #include <zephyr/pm/device.h>
14 #include <soc.h>
15
16 #include "memc_mcux_flexspi.h"
17
18
19 /*
20 * NOTE: If CONFIG_FLASH_MCUX_FLEXSPI_XIP is selected, Any external functions
21 * called while interacting with the flexspi MUST be relocated to SRAM or ITCM
22 * at runtime, so that the chip does not access the flexspi to read program
23 * instructions while it is being written to
24 */
25 #if defined(CONFIG_FLASH_MCUX_FLEXSPI_XIP) && (CONFIG_MEMC_LOG_LEVEL > 0)
26 #warning "Enabling memc driver logging and XIP mode simultaneously can cause \
27 read-while-write hazards. This configuration is not recommended."
28 #endif
29
30 #define FLEXSPI_MAX_LUT 64U
31
32 LOG_MODULE_REGISTER(memc_flexspi, CONFIG_MEMC_LOG_LEVEL);
33
34 struct memc_flexspi_buf_cfg {
35 uint16_t prefetch;
36 uint16_t priority;
37 uint16_t master_id;
38 uint16_t buf_size;
39 } __packed;
40
41 /* Structure tracking LUT offset and usage per each port */
42 struct port_lut {
43 uint8_t lut_offset;
44 uint8_t lut_used;
45 };
46
47 /* flexspi device data should be stored in RAM to avoid read-while-write hazards */
48 struct memc_flexspi_data {
49 FLEXSPI_Type *base;
50 uint8_t *ahb_base;
51 bool xip;
52 bool ahb_bufferable;
53 bool ahb_cacheable;
54 bool ahb_prefetch;
55 bool ahb_read_addr_opt;
56 bool combination_mode;
57 bool sck_differential_clock;
58 flexspi_read_sample_clock_t rx_sample_clock;
59 const struct pinctrl_dev_config *pincfg;
60 size_t size[kFLEXSPI_PortCount];
61 struct port_lut port_luts[kFLEXSPI_PortCount];
62 struct memc_flexspi_buf_cfg *buf_cfg;
63 uint8_t buf_cfg_cnt;
64 const struct device *clock_dev;
65 clock_control_subsys_t clock_subsys;
66 };
67
memc_flexspi_wait_bus_idle(const struct device * dev)68 void memc_flexspi_wait_bus_idle(const struct device *dev)
69 {
70 struct memc_flexspi_data *data = dev->data;
71
72 while (false == FLEXSPI_GetBusIdleStatus(data->base)) {
73 }
74 }
75
memc_flexspi_is_running_xip(const struct device * dev)76 bool memc_flexspi_is_running_xip(const struct device *dev)
77 {
78 struct memc_flexspi_data *data = dev->data;
79
80 return data->xip;
81 }
82
memc_flexspi_update_clock(const struct device * dev,flexspi_device_config_t * device_config,flexspi_port_t port,uint32_t freq_hz)83 int memc_flexspi_update_clock(const struct device *dev,
84 flexspi_device_config_t *device_config,
85 flexspi_port_t port, uint32_t freq_hz)
86 {
87 struct memc_flexspi_data *data = dev->data;
88 uint32_t rate;
89 uint32_t key;
90 int ret;
91
92 /* To reclock the FlexSPI, we should:
93 * - disable the module
94 * - set the new clock
95 * - reenable the module
96 * - reset the module
97 * We CANNOT XIP at any point during this process
98 */
99 key = irq_lock();
100 memc_flexspi_wait_bus_idle(dev);
101
102 ret = clock_control_set_rate(data->clock_dev, data->clock_subsys,
103 (clock_control_subsys_rate_t)freq_hz);
104 if (ret < 0) {
105 irq_unlock(key);
106 return ret;
107 }
108
109 /*
110 * We need to update the DLL value before we call clock_control_get_rate,
111 * because this will cause XIP (flash reads) to occur. Although the
112 * true flash clock is not known, assume the set_rate function programmed
113 * a value close to what we requested.
114 */
115 device_config->flexspiRootClk = freq_hz;
116 FLEXSPI_UpdateDllValue(data->base, device_config, port);
117 memc_flexspi_reset(dev);
118
119 memc_flexspi_wait_bus_idle(dev);
120 ret = clock_control_get_rate(data->clock_dev, data->clock_subsys, &rate);
121 if (ret < 0) {
122 irq_unlock(key);
123 return ret;
124 }
125
126
127 device_config->flexspiRootClk = rate;
128 FLEXSPI_UpdateDllValue(data->base, device_config, port);
129
130 memc_flexspi_reset(dev);
131
132 irq_unlock(key);
133
134 return 0;
135 }
136
memc_flexspi_set_device_config(const struct device * dev,const flexspi_device_config_t * device_config,const uint32_t * lut_array,uint8_t lut_count,flexspi_port_t port)137 int memc_flexspi_set_device_config(const struct device *dev,
138 const flexspi_device_config_t *device_config,
139 const uint32_t *lut_array,
140 uint8_t lut_count,
141 flexspi_port_t port)
142 {
143 flexspi_device_config_t tmp_config;
144 uint32_t tmp_lut[FLEXSPI_MAX_LUT];
145 struct memc_flexspi_data *data = dev->data;
146 const uint32_t *lut_ptr = lut_array;
147 uint8_t lut_used = 0U;
148 unsigned int key = 0;
149
150 if (port >= kFLEXSPI_PortCount) {
151 LOG_ERR("Invalid port number");
152 return -EINVAL;
153 }
154
155 if (data->port_luts[port].lut_used < lut_count) {
156 /* We cannot reuse the existing LUT slot,
157 * Check if the LUT table will fit into the remaining LUT slots
158 */
159 for (uint8_t i = 0; i < kFLEXSPI_PortCount; i++) {
160 lut_used += data->port_luts[i].lut_used;
161 }
162
163 if ((lut_used + lut_count) > FLEXSPI_MAX_LUT) {
164 return -ENOBUFS;
165 }
166 }
167
168 data->size[port] = device_config->flashSize * KB(1);
169
170 if (memc_flexspi_is_running_xip(dev)) {
171 /* We need to avoid flash access while configuring the FlexSPI.
172 * To do this, we will copy the LUT array into stack-allocated
173 * temporary memory
174 */
175 memcpy(tmp_lut, lut_array, lut_count * MEMC_FLEXSPI_CMD_SIZE);
176 lut_ptr = tmp_lut;
177 }
178
179 memcpy(&tmp_config, device_config, sizeof(tmp_config));
180 /* Update FlexSPI AWRSEQID and ARDSEQID values based on where the LUT
181 * array will actually be loaded.
182 */
183 if (data->port_luts[port].lut_used < lut_count) {
184 /* Update lut offset with new value */
185 data->port_luts[port].lut_offset = lut_used;
186 }
187 data->port_luts[port].lut_used = lut_count;
188 tmp_config.ARDSeqIndex += data->port_luts[port].lut_offset;
189 tmp_config.AWRSeqIndex += data->port_luts[port].lut_offset;
190
191 /* Lock IRQs before reconfiguring FlexSPI, to prevent XIP */
192 key = irq_lock();
193
194 FLEXSPI_SetFlashConfig(data->base, &tmp_config, port);
195 FLEXSPI_UpdateLUT(data->base, data->port_luts[port].lut_offset,
196 lut_ptr, lut_count);
197 irq_unlock(key);
198
199 return 0;
200 }
201
memc_flexspi_reset(const struct device * dev)202 int memc_flexspi_reset(const struct device *dev)
203 {
204 struct memc_flexspi_data *data = dev->data;
205
206 FLEXSPI_SoftwareReset(data->base);
207
208 return 0;
209 }
210
memc_flexspi_transfer(const struct device * dev,flexspi_transfer_t * transfer)211 int memc_flexspi_transfer(const struct device *dev,
212 flexspi_transfer_t *transfer)
213 {
214 struct memc_flexspi_data *data = dev->data;
215 status_t status;
216
217 /* Adjust transfer LUT index based on port */
218 transfer->seqIndex += data->port_luts[transfer->port].lut_offset;
219 status = FLEXSPI_TransferBlocking(data->base, transfer);
220
221 if (status != kStatus_Success) {
222 LOG_ERR("Transfer error: %d", status);
223 return -EIO;
224 }
225
226 return 0;
227 }
228
memc_flexspi_get_ahb_address(const struct device * dev,flexspi_port_t port,off_t offset)229 void *memc_flexspi_get_ahb_address(const struct device *dev,
230 flexspi_port_t port, off_t offset)
231 {
232 struct memc_flexspi_data *data = dev->data;
233 int i;
234
235 if (port >= kFLEXSPI_PortCount) {
236 LOG_ERR("Invalid port number: %u", port);
237 return NULL;
238 }
239
240 for (i = 0; i < port; i++) {
241 offset += data->size[i];
242 }
243
244 return data->ahb_base + offset;
245 }
246
memc_flexspi_init(const struct device * dev)247 static int memc_flexspi_init(const struct device *dev)
248 {
249 struct memc_flexspi_data *data = dev->data;
250 flexspi_config_t flexspi_config;
251
252 /* we should not configure the device we are running on */
253 if (memc_flexspi_is_running_xip(dev)) {
254 LOG_DBG("XIP active on %s, skipping init", dev->name);
255 return 0;
256 }
257
258 /*
259 * SOCs such as the RT1064 and RT1024 have internal flash, and no pinmux
260 * settings, continue if no pinctrl state found.
261 */
262 int ret;
263
264 ret = pinctrl_apply_state(data->pincfg, PINCTRL_STATE_DEFAULT);
265 if (ret < 0 && ret != -ENOENT) {
266 return ret;
267 }
268
269 FLEXSPI_GetDefaultConfig(&flexspi_config);
270
271 flexspi_config.ahbConfig.enableAHBBufferable = data->ahb_bufferable;
272 flexspi_config.ahbConfig.enableAHBCachable = data->ahb_cacheable;
273 flexspi_config.ahbConfig.enableAHBPrefetch = data->ahb_prefetch;
274 flexspi_config.ahbConfig.enableReadAddressOpt = data->ahb_read_addr_opt;
275 #if !(defined(FSL_FEATURE_FLEXSPI_HAS_NO_MCR0_COMBINATIONEN) && \
276 FSL_FEATURE_FLEXSPI_HAS_NO_MCR0_COMBINATIONEN)
277 flexspi_config.enableCombination = data->combination_mode;
278 #endif
279
280 #if !(defined(FSL_FEATURE_FLEXSPI_HAS_NO_MCR2_SCKBDIFFOPT) && \
281 FSL_FEATURE_FLEXSPI_HAS_NO_MCR2_SCKBDIFFOPT)
282 flexspi_config.enableSckBDiffOpt = data->sck_differential_clock;
283 #endif
284 flexspi_config.rxSampleClock = data->rx_sample_clock;
285
286 /* Configure AHB RX buffers, if any configuration settings are present */
287 __ASSERT(data->buf_cfg_cnt < FSL_FEATURE_FLEXSPI_AHB_BUFFER_COUNT,
288 "Maximum RX buffer configuration count exceeded");
289 for (uint8_t i = 0; i < data->buf_cfg_cnt; i++) {
290 /* Should AHB prefetch up to buffer size? */
291 flexspi_config.ahbConfig.buffer[i].enablePrefetch = data->buf_cfg[i].prefetch;
292 /* AHB access priority (used for suspending control of AHB prefetching )*/
293 flexspi_config.ahbConfig.buffer[i].priority = data->buf_cfg[i].priority;
294 /* AHB master index, SOC specific */
295 flexspi_config.ahbConfig.buffer[i].masterIndex = data->buf_cfg[i].master_id;
296 /* RX buffer allocation (total available buffer space is instance/SOC specific) */
297 flexspi_config.ahbConfig.buffer[i].bufferSize = data->buf_cfg[i].buf_size;
298 }
299
300 FLEXSPI_Init(data->base, &flexspi_config);
301
302 return 0;
303 }
304
305 #ifdef CONFIG_PM_DEVICE
memc_flexspi_pm_action(const struct device * dev,enum pm_device_action action)306 static int memc_flexspi_pm_action(const struct device *dev, enum pm_device_action action)
307 {
308 struct memc_flexspi_data *data = dev->data;
309 int ret;
310
311 switch (action) {
312 case PM_DEVICE_ACTION_RESUME:
313 ret = pinctrl_apply_state(data->pincfg, PINCTRL_STATE_DEFAULT);
314 if (ret < 0 && ret != -ENOENT) {
315 return ret;
316 }
317 break;
318 case PM_DEVICE_ACTION_SUSPEND:
319 ret = pinctrl_apply_state(data->pincfg, PINCTRL_STATE_SLEEP);
320 if (ret < 0 && ret != -ENOENT) {
321 return ret;
322 }
323 break;
324 default:
325 return -ENOTSUP;
326 }
327
328 return 0;
329 }
330 #endif
331
332 #if defined(CONFIG_XIP) && defined(CONFIG_FLASH_MCUX_FLEXSPI_XIP)
333 /* Checks if image flash base address is in the FlexSPI AHB base region */
334 #define MEMC_FLEXSPI_CFG_XIP(node_id) \
335 ((CONFIG_FLASH_BASE_ADDRESS) >= DT_REG_ADDR_BY_IDX(node_id, 1)) && \
336 ((CONFIG_FLASH_BASE_ADDRESS) < (DT_REG_ADDR_BY_IDX(node_id, 1) + \
337 DT_REG_SIZE_BY_IDX(node_id, 1)))
338
339 #else
340 #define MEMC_FLEXSPI_CFG_XIP(node_id) false
341 #endif
342
343 #define MEMC_FLEXSPI(n) \
344 PINCTRL_DT_INST_DEFINE(n); \
345 static uint16_t buf_cfg_##n[] = \
346 DT_INST_PROP_OR(n, rx_buffer_config, {0}); \
347 \
348 static struct memc_flexspi_data \
349 memc_flexspi_data_##n = { \
350 .base = (FLEXSPI_Type *) DT_INST_REG_ADDR(n), \
351 .xip = MEMC_FLEXSPI_CFG_XIP(DT_DRV_INST(n)), \
352 .ahb_base = (uint8_t *) DT_INST_REG_ADDR_BY_IDX(n, 1), \
353 .ahb_bufferable = DT_INST_PROP(n, ahb_bufferable), \
354 .ahb_cacheable = DT_INST_PROP(n, ahb_cacheable), \
355 .ahb_prefetch = DT_INST_PROP(n, ahb_prefetch), \
356 .ahb_read_addr_opt = DT_INST_PROP(n, ahb_read_addr_opt),\
357 .combination_mode = DT_INST_PROP(n, combination_mode), \
358 .sck_differential_clock = DT_INST_PROP(n, sck_differential_clock), \
359 .rx_sample_clock = DT_INST_PROP(n, rx_clock_source), \
360 .buf_cfg = (struct memc_flexspi_buf_cfg *)buf_cfg_##n, \
361 .buf_cfg_cnt = sizeof(buf_cfg_##n) / \
362 sizeof(struct memc_flexspi_buf_cfg), \
363 .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
364 .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \
365 .clock_subsys = (clock_control_subsys_t) \
366 DT_INST_CLOCKS_CELL(n, name), \
367 }; \
368 \
369 PM_DEVICE_DT_INST_DEFINE(n, memc_flexspi_pm_action); \
370 \
371 DEVICE_DT_INST_DEFINE(n, \
372 memc_flexspi_init, \
373 PM_DEVICE_DT_INST_GET(n), \
374 &memc_flexspi_data_##n, \
375 NULL, \
376 POST_KERNEL, \
377 CONFIG_MEMC_INIT_PRIORITY, \
378 NULL);
379
380 DT_INST_FOREACH_STATUS_OKAY(MEMC_FLEXSPI)
381