1 /*
2 * Copyright 2020-2023 NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT nxp_imx_flexspi
8
9 #include <zephyr/logging/log.h>
10 #include <zephyr/sys/util.h>
11 #include <zephyr/drivers/pinctrl.h>
12 #include <zephyr/drivers/clock_control.h>
13 #include <zephyr/pm/device.h>
14 #include <soc.h>
15
16 #include "memc_mcux_flexspi.h"
17
18
19 /*
20 * NOTE: If CONFIG_FLASH_MCUX_FLEXSPI_XIP is selected, Any external functions
21 * called while interacting with the flexspi MUST be relocated to SRAM or ITCM
22 * at runtime, so that the chip does not access the flexspi to read program
23 * instructions while it is being written to
24 */
25 #if defined(CONFIG_FLASH_MCUX_FLEXSPI_XIP) && (CONFIG_MEMC_LOG_LEVEL > 0)
26 #warning "Enabling memc driver logging and XIP mode simultaneously can cause \
27 read-while-write hazards. This configuration is not recommended."
28 #endif
29
30 #define FLEXSPI_MAX_LUT 64U
31
32 LOG_MODULE_REGISTER(memc_flexspi, CONFIG_MEMC_LOG_LEVEL);
33
34 struct memc_flexspi_buf_cfg {
35 uint16_t prefetch;
36 uint16_t priority;
37 uint16_t master_id;
38 uint16_t buf_size;
39 } __packed;
40
41 /* Structure tracking LUT offset and usage per each port */
42 struct port_lut {
43 uint8_t lut_offset;
44 uint8_t lut_used;
45 };
46
47 /* flexspi device data should be stored in RAM to avoid read-while-write hazards */
48 struct memc_flexspi_data {
49 FLEXSPI_Type *base;
50 uint8_t *ahb_base;
51 bool xip;
52 bool ahb_bufferable;
53 bool ahb_cacheable;
54 bool ahb_prefetch;
55 bool ahb_read_addr_opt;
56 bool combination_mode;
57 bool sck_differential_clock;
58 flexspi_read_sample_clock_t rx_sample_clock;
59 #if defined(FSL_FEATURE_FLEXSPI_SUPPORT_SEPERATE_RXCLKSRC_PORTB) && \
60 FSL_FEATURE_FLEXSPI_SUPPORT_SEPERATE_RXCLKSRC_PORTB
61 flexspi_read_sample_clock_t rx_sample_clock_b;
62 #endif
63 const struct pinctrl_dev_config *pincfg;
64 size_t size[kFLEXSPI_PortCount];
65 struct port_lut port_luts[kFLEXSPI_PortCount];
66 struct memc_flexspi_buf_cfg *buf_cfg;
67 uint8_t buf_cfg_cnt;
68 const struct device *clock_dev;
69 clock_control_subsys_t clock_subsys;
70 };
71
memc_flexspi_wait_bus_idle(const struct device * dev)72 void memc_flexspi_wait_bus_idle(const struct device *dev)
73 {
74 struct memc_flexspi_data *data = dev->data;
75
76 while (false == FLEXSPI_GetBusIdleStatus(data->base)) {
77 }
78 }
79
memc_flexspi_is_running_xip(const struct device * dev)80 bool memc_flexspi_is_running_xip(const struct device *dev)
81 {
82 struct memc_flexspi_data *data = dev->data;
83
84 return data->xip;
85 }
86
memc_flexspi_update_clock(const struct device * dev,flexspi_device_config_t * device_config,flexspi_port_t port,uint32_t freq_hz)87 int memc_flexspi_update_clock(const struct device *dev,
88 flexspi_device_config_t *device_config,
89 flexspi_port_t port, uint32_t freq_hz)
90 {
91 struct memc_flexspi_data *data = dev->data;
92 uint32_t rate;
93 uint32_t key;
94 int ret;
95
96 /* To reclock the FlexSPI, we should:
97 * - disable the module
98 * - set the new clock
99 * - reenable the module
100 * - reset the module
101 * We CANNOT XIP at any point during this process
102 */
103 key = irq_lock();
104 memc_flexspi_wait_bus_idle(dev);
105
106 ret = clock_control_set_rate(data->clock_dev, data->clock_subsys,
107 (clock_control_subsys_rate_t)freq_hz);
108 if (ret < 0) {
109 irq_unlock(key);
110 return ret;
111 }
112
113 /*
114 * We need to update the DLL value before we call clock_control_get_rate,
115 * because this will cause XIP (flash reads) to occur. Although the
116 * true flash clock is not known, assume the set_rate function programmed
117 * a value close to what we requested.
118 */
119 device_config->flexspiRootClk = freq_hz;
120 FLEXSPI_UpdateDllValue(data->base, device_config, port);
121 memc_flexspi_reset(dev);
122
123 memc_flexspi_wait_bus_idle(dev);
124 ret = clock_control_get_rate(data->clock_dev, data->clock_subsys, &rate);
125 if (ret < 0) {
126 irq_unlock(key);
127 return ret;
128 }
129
130
131 device_config->flexspiRootClk = rate;
132 FLEXSPI_UpdateDllValue(data->base, device_config, port);
133
134 memc_flexspi_reset(dev);
135
136 irq_unlock(key);
137
138 return 0;
139 }
140
memc_flexspi_set_device_config(const struct device * dev,const flexspi_device_config_t * device_config,const uint32_t * lut_array,uint8_t lut_count,flexspi_port_t port)141 int memc_flexspi_set_device_config(const struct device *dev,
142 const flexspi_device_config_t *device_config,
143 const uint32_t *lut_array,
144 uint8_t lut_count,
145 flexspi_port_t port)
146 {
147 flexspi_device_config_t tmp_config;
148 uint32_t tmp_lut[FLEXSPI_MAX_LUT];
149 struct memc_flexspi_data *data = dev->data;
150 const uint32_t *lut_ptr = lut_array;
151 uint8_t lut_used = 0U;
152 unsigned int key = 0;
153
154 if (port >= kFLEXSPI_PortCount) {
155 LOG_ERR("Invalid port number");
156 return -EINVAL;
157 }
158
159 if (data->port_luts[port].lut_used < lut_count) {
160 /* We cannot reuse the existing LUT slot,
161 * Check if the LUT table will fit into the remaining LUT slots
162 */
163 for (uint8_t i = 0; i < kFLEXSPI_PortCount; i++) {
164 lut_used += data->port_luts[i].lut_used;
165 }
166
167 if ((lut_used + lut_count) > FLEXSPI_MAX_LUT) {
168 return -ENOBUFS;
169 }
170 }
171
172 data->size[port] = device_config->flashSize * KB(1);
173
174 if (memc_flexspi_is_running_xip(dev)) {
175 /* We need to avoid flash access while configuring the FlexSPI.
176 * To do this, we will copy the LUT array into stack-allocated
177 * temporary memory
178 */
179 memcpy(tmp_lut, lut_array, lut_count * MEMC_FLEXSPI_CMD_SIZE);
180 lut_ptr = tmp_lut;
181 }
182
183 memcpy(&tmp_config, device_config, sizeof(tmp_config));
184 /* Update FlexSPI AWRSEQID and ARDSEQID values based on where the LUT
185 * array will actually be loaded.
186 */
187 if (data->port_luts[port].lut_used < lut_count) {
188 /* Update lut offset with new value */
189 data->port_luts[port].lut_offset = lut_used;
190 }
191 /* LUTs should only be installed on sequence boundaries, every
192 * 4 entries. Round LUT usage up to nearest sequence
193 */
194 data->port_luts[port].lut_used = ROUND_UP(lut_count, 4);
195 tmp_config.ARDSeqIndex += data->port_luts[port].lut_offset / MEMC_FLEXSPI_CMD_PER_SEQ;
196 tmp_config.AWRSeqIndex += data->port_luts[port].lut_offset / MEMC_FLEXSPI_CMD_PER_SEQ;
197
198 /* Lock IRQs before reconfiguring FlexSPI, to prevent XIP */
199 key = irq_lock();
200
201 FLEXSPI_SetFlashConfig(data->base, &tmp_config, port);
202 FLEXSPI_UpdateLUT(data->base, data->port_luts[port].lut_offset,
203 lut_ptr, lut_count);
204 irq_unlock(key);
205
206 return 0;
207 }
208
memc_flexspi_reset(const struct device * dev)209 int memc_flexspi_reset(const struct device *dev)
210 {
211 struct memc_flexspi_data *data = dev->data;
212
213 FLEXSPI_SoftwareReset(data->base);
214
215 return 0;
216 }
217
memc_flexspi_transfer(const struct device * dev,flexspi_transfer_t * transfer)218 int memc_flexspi_transfer(const struct device *dev,
219 flexspi_transfer_t *transfer)
220 {
221 flexspi_transfer_t tmp;
222 struct memc_flexspi_data *data = dev->data;
223 status_t status;
224 uint32_t seq_off, addr_offset = 0U;
225 int i;
226
227 /* Calculate sequence offset and address offset based on port */
228 seq_off = data->port_luts[transfer->port].lut_offset /
229 MEMC_FLEXSPI_CMD_PER_SEQ;
230 for (i = 0; i < transfer->port; i++) {
231 addr_offset += data->size[i];
232 }
233
234 if ((seq_off != 0) || (addr_offset != 0)) {
235 /* Adjust device address and sequence index for transfer */
236 memcpy(&tmp, transfer, sizeof(tmp));
237 tmp.seqIndex += seq_off;
238 tmp.deviceAddress += addr_offset;
239 status = FLEXSPI_TransferBlocking(data->base, &tmp);
240 } else {
241 /* Transfer does not need adjustment */
242 status = FLEXSPI_TransferBlocking(data->base, transfer);
243 }
244
245 if (status != kStatus_Success) {
246 LOG_ERR("Transfer error: %d", status);
247 return -EIO;
248 }
249
250 return 0;
251 }
252
memc_flexspi_get_ahb_address(const struct device * dev,flexspi_port_t port,off_t offset)253 void *memc_flexspi_get_ahb_address(const struct device *dev,
254 flexspi_port_t port, off_t offset)
255 {
256 struct memc_flexspi_data *data = dev->data;
257 int i;
258
259 if (port >= kFLEXSPI_PortCount) {
260 LOG_ERR("Invalid port number: %u", port);
261 return NULL;
262 }
263
264 for (i = 0; i < port; i++) {
265 offset += data->size[i];
266 }
267
268 #if defined(FSL_FEATURE_FLEXSPI_SUPPORT_ADDRESS_SHIFT) && \
269 (FSL_FEATURE_FLEXSPI_SUPPORT_ADDRESS_SHIFT)
270 if (data->base->FLSHCR0[port] & FLEXSPI_FLSHCR0_ADDRSHIFT_MASK) {
271 /* Address shift is set, add 0x1000_0000 to AHB address */
272 offset += 0x10000000;
273 }
274 #endif
275
276 return data->ahb_base + offset;
277 }
278
memc_flexspi_init(const struct device * dev)279 static int memc_flexspi_init(const struct device *dev)
280 {
281 struct memc_flexspi_data *data = dev->data;
282 flexspi_config_t flexspi_config;
283 uint32_t flash_sizes[kFLEXSPI_PortCount];
284 int ret;
285 uint8_t i;
286
287 /* we should not configure the device we are running on */
288 if (memc_flexspi_is_running_xip(dev)) {
289 if (!IS_ENABLED(CONFIG_MEMC_MCUX_FLEXSPI_INIT_XIP)) {
290 LOG_DBG("XIP active on %s, skipping init", dev->name);
291 return 0;
292 }
293 }
294 /*
295 * SOCs such as the RT1064 and RT1024 have internal flash, and no pinmux
296 * settings, continue if no pinctrl state found.
297 */
298 ret = pinctrl_apply_state(data->pincfg, PINCTRL_STATE_DEFAULT);
299 if (ret < 0 && ret != -ENOENT) {
300 return ret;
301 }
302
303 FLEXSPI_GetDefaultConfig(&flexspi_config);
304
305 flexspi_config.ahbConfig.enableAHBBufferable = data->ahb_bufferable;
306 flexspi_config.ahbConfig.enableAHBCachable = data->ahb_cacheable;
307 flexspi_config.ahbConfig.enableAHBPrefetch = data->ahb_prefetch;
308 flexspi_config.ahbConfig.enableReadAddressOpt = data->ahb_read_addr_opt;
309 #if !(defined(FSL_FEATURE_FLEXSPI_HAS_NO_MCR0_COMBINATIONEN) && \
310 FSL_FEATURE_FLEXSPI_HAS_NO_MCR0_COMBINATIONEN)
311 flexspi_config.enableCombination = data->combination_mode;
312 #endif
313
314 #if !(defined(FSL_FEATURE_FLEXSPI_HAS_NO_MCR2_SCKBDIFFOPT) && \
315 FSL_FEATURE_FLEXSPI_HAS_NO_MCR2_SCKBDIFFOPT)
316 flexspi_config.enableSckBDiffOpt = data->sck_differential_clock;
317 #endif
318 flexspi_config.rxSampleClock = data->rx_sample_clock;
319 #if defined(FSL_FEATURE_FLEXSPI_SUPPORT_SEPERATE_RXCLKSRC_PORTB) && \
320 FSL_FEATURE_FLEXSPI_SUPPORT_SEPERATE_RXCLKSRC_PORTB
321 flexspi_config.rxSampleClockPortB = data->rx_sample_clock_b;
322 #if defined(FSL_FEATURE_FLEXSPI_SUPPORT_RXCLKSRC_DIFF) && \
323 FSL_FEATURE_FLEXSPI_SUPPORT_RXCLKSRC_DIFF
324 if (flexspi_config.rxSampleClock != flexspi_config.rxSampleClockPortB) {
325 flexspi_config.rxSampleClockDiff = true;
326 }
327 #endif
328 #endif
329
330 /* Configure AHB RX buffers, if any configuration settings are present */
331 __ASSERT(data->buf_cfg_cnt < FSL_FEATURE_FLEXSPI_AHB_BUFFER_COUNT,
332 "Maximum RX buffer configuration count exceeded");
333 for (i = 0; i < data->buf_cfg_cnt; i++) {
334 /* Should AHB prefetch up to buffer size? */
335 flexspi_config.ahbConfig.buffer[i].enablePrefetch = data->buf_cfg[i].prefetch;
336 /* AHB access priority (used for suspending control of AHB prefetching )*/
337 flexspi_config.ahbConfig.buffer[i].priority = data->buf_cfg[i].priority;
338 /* AHB master index, SOC specific */
339 flexspi_config.ahbConfig.buffer[i].masterIndex = data->buf_cfg[i].master_id;
340 /* RX buffer allocation (total available buffer space is instance/SOC specific) */
341 flexspi_config.ahbConfig.buffer[i].bufferSize = data->buf_cfg[i].buf_size;
342 }
343
344 if (memc_flexspi_is_running_xip(dev)) {
345 /* Save flash sizes- FlexSPI init will reset them */
346 for (i = 0; i < kFLEXSPI_PortCount; i++) {
347 flash_sizes[i] = data->base->FLSHCR0[i];
348 }
349 }
350
351 FLEXSPI_Init(data->base, &flexspi_config);
352
353 if (memc_flexspi_is_running_xip(dev)) {
354 /* Restore flash sizes */
355 for (i = 0; i < kFLEXSPI_PortCount; i++) {
356 data->base->FLSHCR0[i] = flash_sizes[i];
357 }
358
359 /* Reenable FLEXSPI module */
360 data->base->MCR0 &= ~FLEXSPI_MCR0_MDIS_MASK;
361 }
362
363 return 0;
364 }
365
366 #ifdef CONFIG_PM_DEVICE
memc_flexspi_pm_action(const struct device * dev,enum pm_device_action action)367 static int memc_flexspi_pm_action(const struct device *dev, enum pm_device_action action)
368 {
369 struct memc_flexspi_data *data = dev->data;
370 int ret;
371
372 switch (action) {
373 case PM_DEVICE_ACTION_RESUME:
374 ret = pinctrl_apply_state(data->pincfg, PINCTRL_STATE_DEFAULT);
375 if (ret < 0 && ret != -ENOENT) {
376 return ret;
377 }
378 break;
379 case PM_DEVICE_ACTION_SUSPEND:
380 ret = pinctrl_apply_state(data->pincfg, PINCTRL_STATE_SLEEP);
381 if (ret < 0 && ret != -ENOENT) {
382 return ret;
383 }
384 break;
385 default:
386 return -ENOTSUP;
387 }
388
389 return 0;
390 }
391 #endif
392
393 #if defined(FSL_FEATURE_FLEXSPI_SUPPORT_SEPERATE_RXCLKSRC_PORTB) && \
394 FSL_FEATURE_FLEXSPI_SUPPORT_SEPERATE_RXCLKSRC_PORTB
395 #define MEMC_FLEXSPI_RXCLK_B(inst) .rx_sample_clock_b = DT_INST_PROP(inst, rx_clock_source_b),
396 #else
397 #define MEMC_FLEXSPI_RXCLK_B(inst)
398 #endif
399
400 #if defined(CONFIG_XIP) && defined(CONFIG_FLASH_MCUX_FLEXSPI_XIP)
401 /* Checks if image flash base address is in the FlexSPI AHB base region */
402 #define MEMC_FLEXSPI_CFG_XIP(node_id) \
403 ((CONFIG_FLASH_BASE_ADDRESS) >= DT_REG_ADDR_BY_IDX(node_id, 1)) && \
404 ((CONFIG_FLASH_BASE_ADDRESS) < (DT_REG_ADDR_BY_IDX(node_id, 1) + \
405 DT_REG_SIZE_BY_IDX(node_id, 1)))
406
407 #else
408 #define MEMC_FLEXSPI_CFG_XIP(node_id) false
409 #endif
410
411 #define MEMC_FLEXSPI(n) \
412 PINCTRL_DT_INST_DEFINE(n); \
413 static uint16_t buf_cfg_##n[] = \
414 DT_INST_PROP_OR(n, rx_buffer_config, {0}); \
415 \
416 static struct memc_flexspi_data \
417 memc_flexspi_data_##n = { \
418 .base = (FLEXSPI_Type *) DT_INST_REG_ADDR(n), \
419 .xip = MEMC_FLEXSPI_CFG_XIP(DT_DRV_INST(n)), \
420 .ahb_base = (uint8_t *) DT_INST_REG_ADDR_BY_IDX(n, 1), \
421 .ahb_bufferable = DT_INST_PROP(n, ahb_bufferable), \
422 .ahb_cacheable = DT_INST_PROP(n, ahb_cacheable), \
423 .ahb_prefetch = DT_INST_PROP(n, ahb_prefetch), \
424 .ahb_read_addr_opt = DT_INST_PROP(n, ahb_read_addr_opt),\
425 .combination_mode = DT_INST_PROP(n, combination_mode), \
426 .sck_differential_clock = DT_INST_PROP(n, sck_differential_clock), \
427 .rx_sample_clock = DT_INST_PROP(n, rx_clock_source), \
428 MEMC_FLEXSPI_RXCLK_B(n) \
429 .buf_cfg = (struct memc_flexspi_buf_cfg *)buf_cfg_##n, \
430 .buf_cfg_cnt = sizeof(buf_cfg_##n) / \
431 sizeof(struct memc_flexspi_buf_cfg), \
432 .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
433 .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \
434 .clock_subsys = (clock_control_subsys_t) \
435 DT_INST_CLOCKS_CELL(n, name), \
436 }; \
437 \
438 PM_DEVICE_DT_INST_DEFINE(n, memc_flexspi_pm_action); \
439 \
440 DEVICE_DT_INST_DEFINE(n, \
441 memc_flexspi_init, \
442 PM_DEVICE_DT_INST_GET(n), \
443 &memc_flexspi_data_##n, \
444 NULL, \
445 POST_KERNEL, \
446 CONFIG_MEMC_MCUX_FLEXSPI_INIT_PRIORITY, \
447 NULL);
448
449 DT_INST_FOREACH_STATUS_OKAY(MEMC_FLEXSPI)
450