1 /*
2  * Copyright 2020-2023 NXP
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT	nxp_imx_flexspi
8 
9 #include <zephyr/logging/log.h>
10 #include <zephyr/sys/util.h>
11 #include <zephyr/drivers/pinctrl.h>
12 #include <zephyr/drivers/clock_control.h>
13 #include <zephyr/pm/device.h>
14 #include <soc.h>
15 
16 #include "memc_mcux_flexspi.h"
17 
18 
19 /*
20  * NOTE: If CONFIG_FLASH_MCUX_FLEXSPI_XIP is selected, Any external functions
21  * called while interacting with the flexspi MUST be relocated to SRAM or ITCM
22  * at runtime, so that the chip does not access the flexspi to read program
23  * instructions while it is being written to
24  */
25 #if defined(CONFIG_FLASH_MCUX_FLEXSPI_XIP) && (CONFIG_MEMC_LOG_LEVEL > 0)
26 #warning "Enabling memc driver logging and XIP mode simultaneously can cause \
27 	read-while-write hazards. This configuration is not recommended."
28 #endif
29 
30 #define FLEXSPI_MAX_LUT 64U
31 
32 LOG_MODULE_REGISTER(memc_flexspi, CONFIG_MEMC_LOG_LEVEL);
33 
34 struct memc_flexspi_buf_cfg {
35 	uint16_t prefetch;
36 	uint16_t priority;
37 	uint16_t master_id;
38 	uint16_t buf_size;
39 } __packed;
40 
41 /* Structure tracking LUT offset and usage per each port */
42 struct port_lut {
43 	uint8_t lut_offset;
44 	uint8_t lut_used;
45 };
46 
47 /* flexspi device data should be stored in RAM to avoid read-while-write hazards */
48 struct memc_flexspi_data {
49 	FLEXSPI_Type *base;
50 	uint8_t *ahb_base;
51 	bool xip;
52 	bool ahb_bufferable;
53 	bool ahb_cacheable;
54 	bool ahb_prefetch;
55 	bool ahb_read_addr_opt;
56 	uint8_t ahb_boundary;
57 	bool combination_mode;
58 	bool sck_differential_clock;
59 	flexspi_read_sample_clock_t rx_sample_clock;
60 #if defined(FSL_FEATURE_FLEXSPI_SUPPORT_SEPERATE_RXCLKSRC_PORTB) && \
61 FSL_FEATURE_FLEXSPI_SUPPORT_SEPERATE_RXCLKSRC_PORTB
62 	flexspi_read_sample_clock_t rx_sample_clock_b;
63 #endif
64 	const struct pinctrl_dev_config *pincfg;
65 	size_t size[kFLEXSPI_PortCount];
66 	struct port_lut port_luts[kFLEXSPI_PortCount];
67 	struct memc_flexspi_buf_cfg *buf_cfg;
68 	uint8_t buf_cfg_cnt;
69 	const struct device *clock_dev;
70 	clock_control_subsys_t clock_subsys;
71 };
72 
memc_flexspi_wait_bus_idle(const struct device * dev)73 void memc_flexspi_wait_bus_idle(const struct device *dev)
74 {
75 	struct memc_flexspi_data *data = dev->data;
76 
77 	while (false == FLEXSPI_GetBusIdleStatus(data->base)) {
78 	}
79 }
80 
memc_flexspi_is_running_xip(const struct device * dev)81 bool memc_flexspi_is_running_xip(const struct device *dev)
82 {
83 	struct memc_flexspi_data *data = dev->data;
84 
85 	return data->xip;
86 }
87 
memc_flexspi_update_clock(const struct device * dev,flexspi_device_config_t * device_config,flexspi_port_t port,uint32_t freq_hz)88 int memc_flexspi_update_clock(const struct device *dev,
89 		flexspi_device_config_t *device_config,
90 		flexspi_port_t port, uint32_t freq_hz)
91 {
92 	struct memc_flexspi_data *data = dev->data;
93 	uint32_t rate;
94 	uint32_t key;
95 	int ret;
96 
97 	/* To reclock the FlexSPI, we should:
98 	 * - disable the module
99 	 * - set the new clock
100 	 * - reenable the module
101 	 * - reset the module
102 	 * We CANNOT XIP at any point during this process
103 	 */
104 	key = irq_lock();
105 	memc_flexspi_wait_bus_idle(dev);
106 
107 	ret = clock_control_set_rate(data->clock_dev, data->clock_subsys,
108 				(clock_control_subsys_rate_t)freq_hz);
109 	if (ret < 0) {
110 		irq_unlock(key);
111 		return ret;
112 	}
113 
114 	/*
115 	 * We need to update the DLL value before we call clock_control_get_rate,
116 	 * because this will cause XIP (flash reads) to occur. Although the
117 	 * true flash clock is not known, assume the set_rate function programmed
118 	 * a value close to what we requested.
119 	 */
120 	device_config->flexspiRootClk = freq_hz;
121 	FLEXSPI_UpdateDllValue(data->base, device_config, port);
122 	memc_flexspi_reset(dev);
123 
124 	memc_flexspi_wait_bus_idle(dev);
125 	ret = clock_control_get_rate(data->clock_dev, data->clock_subsys, &rate);
126 	if (ret < 0) {
127 		irq_unlock(key);
128 		return ret;
129 	}
130 
131 
132 	device_config->flexspiRootClk = rate;
133 	FLEXSPI_UpdateDllValue(data->base, device_config, port);
134 
135 	memc_flexspi_reset(dev);
136 
137 	irq_unlock(key);
138 
139 	return 0;
140 }
141 
memc_flexspi_set_device_config(const struct device * dev,const flexspi_device_config_t * device_config,const uint32_t * lut_array,uint8_t lut_count,flexspi_port_t port)142 int memc_flexspi_set_device_config(const struct device *dev,
143 		const flexspi_device_config_t *device_config,
144 		const uint32_t *lut_array,
145 		uint8_t lut_count,
146 		flexspi_port_t port)
147 {
148 	flexspi_device_config_t tmp_config;
149 	uint32_t tmp_lut[FLEXSPI_MAX_LUT];
150 	struct memc_flexspi_data *data = dev->data;
151 	const uint32_t *lut_ptr = lut_array;
152 	uint8_t lut_used = 0U;
153 	unsigned int key = 0;
154 
155 	if (port >= kFLEXSPI_PortCount) {
156 		LOG_ERR("Invalid port number");
157 		return -EINVAL;
158 	}
159 
160 	if (data->port_luts[port].lut_used < lut_count) {
161 		/* We cannot reuse the existing LUT slot,
162 		 * Check if the LUT table will fit into the remaining LUT slots
163 		 */
164 		for (uint8_t i = 0; i < kFLEXSPI_PortCount; i++) {
165 			lut_used += data->port_luts[i].lut_used;
166 		}
167 
168 		if ((lut_used + lut_count) > FLEXSPI_MAX_LUT) {
169 			return -ENOBUFS;
170 		}
171 	}
172 
173 	data->size[port] = device_config->flashSize * KB(1);
174 
175 	if (memc_flexspi_is_running_xip(dev)) {
176 		/* We need to avoid flash access while configuring the FlexSPI.
177 		 * To do this, we will copy the LUT array into stack-allocated
178 		 * temporary memory
179 		 */
180 		memcpy(tmp_lut, lut_array, lut_count * MEMC_FLEXSPI_CMD_SIZE);
181 		lut_ptr = tmp_lut;
182 	}
183 
184 	memcpy(&tmp_config, device_config, sizeof(tmp_config));
185 	/* Update FlexSPI AWRSEQID and ARDSEQID values based on where the LUT
186 	 * array will actually be loaded.
187 	 */
188 	if (data->port_luts[port].lut_used < lut_count) {
189 		/* Update lut offset with new value */
190 		data->port_luts[port].lut_offset = lut_used;
191 	}
192 	/* LUTs should only be installed on sequence boundaries, every
193 	 * 4 entries. Round LUT usage up to nearest sequence
194 	 */
195 	data->port_luts[port].lut_used = ROUND_UP(lut_count, 4);
196 	tmp_config.ARDSeqIndex += data->port_luts[port].lut_offset / MEMC_FLEXSPI_CMD_PER_SEQ;
197 	tmp_config.AWRSeqIndex += data->port_luts[port].lut_offset / MEMC_FLEXSPI_CMD_PER_SEQ;
198 
199 	/* Lock IRQs before reconfiguring FlexSPI, to prevent XIP */
200 	key = irq_lock();
201 
202 	FLEXSPI_SetFlashConfig(data->base, &tmp_config, port);
203 	FLEXSPI_UpdateLUT(data->base, data->port_luts[port].lut_offset,
204 			  lut_ptr, lut_count);
205 	irq_unlock(key);
206 
207 	return 0;
208 }
209 
memc_flexspi_reset(const struct device * dev)210 int memc_flexspi_reset(const struct device *dev)
211 {
212 	struct memc_flexspi_data *data = dev->data;
213 
214 	FLEXSPI_SoftwareReset(data->base);
215 
216 	return 0;
217 }
218 
memc_flexspi_transfer(const struct device * dev,flexspi_transfer_t * transfer)219 int memc_flexspi_transfer(const struct device *dev,
220 		flexspi_transfer_t *transfer)
221 {
222 	flexspi_transfer_t tmp;
223 	struct memc_flexspi_data *data = dev->data;
224 	status_t status;
225 	uint32_t seq_off, addr_offset = 0U;
226 	int i;
227 
228 	/* Calculate sequence offset and address offset based on port */
229 	seq_off = data->port_luts[transfer->port].lut_offset /
230 				MEMC_FLEXSPI_CMD_PER_SEQ;
231 	for (i = 0; i < transfer->port; i++) {
232 		addr_offset += data->size[i];
233 	}
234 
235 	if ((seq_off != 0) || (addr_offset != 0)) {
236 		/* Adjust device address and sequence index for transfer */
237 		memcpy(&tmp, transfer, sizeof(tmp));
238 		tmp.seqIndex += seq_off;
239 		tmp.deviceAddress += addr_offset;
240 		status = FLEXSPI_TransferBlocking(data->base, &tmp);
241 	} else {
242 		/* Transfer does not need adjustment */
243 		status = FLEXSPI_TransferBlocking(data->base, transfer);
244 	}
245 
246 	if (status != kStatus_Success) {
247 		LOG_ERR("Transfer error: %d", status);
248 		return -EIO;
249 	}
250 
251 	return 0;
252 }
253 
memc_flexspi_get_ahb_address(const struct device * dev,flexspi_port_t port,off_t offset)254 void *memc_flexspi_get_ahb_address(const struct device *dev,
255 		flexspi_port_t port, off_t offset)
256 {
257 	struct memc_flexspi_data *data = dev->data;
258 	int i;
259 
260 	if (port >= kFLEXSPI_PortCount) {
261 		LOG_ERR("Invalid port number: %u", port);
262 		return NULL;
263 	}
264 
265 	for (i = 0; i < port; i++) {
266 		offset += data->size[i];
267 	}
268 
269 	return data->ahb_base + offset;
270 }
271 
memc_flexspi_init(const struct device * dev)272 static int memc_flexspi_init(const struct device *dev)
273 {
274 	struct memc_flexspi_data *data = dev->data;
275 	flexspi_config_t flexspi_config;
276 	uint32_t flash_sizes[kFLEXSPI_PortCount];
277 	int ret;
278 	uint8_t i;
279 
280 	/* we should not configure the device we are running on */
281 	if (memc_flexspi_is_running_xip(dev)) {
282 		if (!IS_ENABLED(CONFIG_MEMC_MCUX_FLEXSPI_INIT_XIP)) {
283 			LOG_DBG("XIP active on %s, skipping init", dev->name);
284 			return 0;
285 		}
286 	}
287 	/*
288 	 * SOCs such as the RT1064 and RT1024 have internal flash, and no pinmux
289 	 * settings, continue if no pinctrl state found.
290 	 */
291 	ret = pinctrl_apply_state(data->pincfg, PINCTRL_STATE_DEFAULT);
292 	if (ret < 0 && ret != -ENOENT) {
293 		return ret;
294 	}
295 
296 	FLEXSPI_GetDefaultConfig(&flexspi_config);
297 
298 	flexspi_config.ahbConfig.enableAHBBufferable = data->ahb_bufferable;
299 	flexspi_config.ahbConfig.enableAHBCachable = data->ahb_cacheable;
300 	flexspi_config.ahbConfig.enableAHBPrefetch = data->ahb_prefetch;
301 	flexspi_config.ahbConfig.enableReadAddressOpt = data->ahb_read_addr_opt;
302 #if !(defined(FSL_FEATURE_FLEXSPI_HAS_NO_MCR0_COMBINATIONEN) && \
303 	FSL_FEATURE_FLEXSPI_HAS_NO_MCR0_COMBINATIONEN)
304 	flexspi_config.enableCombination = data->combination_mode;
305 #endif
306 
307 #if !(defined(FSL_FEATURE_FLEXSPI_HAS_NO_MCR2_SCKBDIFFOPT) && \
308 	FSL_FEATURE_FLEXSPI_HAS_NO_MCR2_SCKBDIFFOPT)
309 	flexspi_config.enableSckBDiffOpt = data->sck_differential_clock;
310 #endif
311 	flexspi_config.rxSampleClock = data->rx_sample_clock;
312 #if defined(FSL_FEATURE_FLEXSPI_SUPPORT_SEPERATE_RXCLKSRC_PORTB) && \
313 FSL_FEATURE_FLEXSPI_SUPPORT_SEPERATE_RXCLKSRC_PORTB
314 	flexspi_config.rxSampleClockPortB = data->rx_sample_clock_b;
315 #if defined(FSL_FEATURE_FLEXSPI_SUPPORT_RXCLKSRC_DIFF) && \
316 	FSL_FEATURE_FLEXSPI_SUPPORT_RXCLKSRC_DIFF
317 	if (flexspi_config.rxSampleClock != flexspi_config.rxSampleClockPortB) {
318 		flexspi_config.rxSampleClockDiff = true;
319 	}
320 #endif
321 #endif
322 
323 	/* Configure AHB RX buffers, if any configuration settings are present */
324 	__ASSERT(data->buf_cfg_cnt < FSL_FEATURE_FLEXSPI_AHB_BUFFER_COUNT,
325 		"Maximum RX buffer configuration count exceeded");
326 	for (i = 0; i < data->buf_cfg_cnt; i++) {
327 		/* Should AHB prefetch up to buffer size? */
328 		flexspi_config.ahbConfig.buffer[i].enablePrefetch = data->buf_cfg[i].prefetch;
329 		/* AHB access priority (used for suspending control of AHB prefetching )*/
330 		flexspi_config.ahbConfig.buffer[i].priority = data->buf_cfg[i].priority;
331 		/* AHB master index, SOC specific */
332 		flexspi_config.ahbConfig.buffer[i].masterIndex = data->buf_cfg[i].master_id;
333 		/* RX buffer allocation (total available buffer space is instance/SOC specific) */
334 		flexspi_config.ahbConfig.buffer[i].bufferSize = data->buf_cfg[i].buf_size;
335 	}
336 
337 	if (memc_flexspi_is_running_xip(dev)) {
338 		/* Save flash sizes- FlexSPI init will reset them */
339 		for (i = 0; i < kFLEXSPI_PortCount; i++) {
340 			flash_sizes[i] = data->base->FLSHCR0[i];
341 		}
342 	}
343 
344 	FLEXSPI_Init(data->base, &flexspi_config);
345 
346 #if defined(FLEXSPI_AHBCR_ALIGNMENT_MASK)
347 	/* Configure AHB alignment boundary */
348 	data->base->AHBCR = (data->base->AHBCR & ~FLEXSPI_AHBCR_ALIGNMENT_MASK) |
349 		FLEXSPI_AHBCR_ALIGNMENT(data->ahb_boundary);
350 #endif
351 
352 	if (memc_flexspi_is_running_xip(dev)) {
353 		/* Restore flash sizes */
354 		for (i = 0; i < kFLEXSPI_PortCount; i++) {
355 			data->base->FLSHCR0[i] = flash_sizes[i];
356 		}
357 
358 		/* Reenable FLEXSPI module */
359 		data->base->MCR0 &= ~FLEXSPI_MCR0_MDIS_MASK;
360 	}
361 
362 	return 0;
363 }
364 
365 #ifdef CONFIG_PM_DEVICE
memc_flexspi_pm_action(const struct device * dev,enum pm_device_action action)366 static int memc_flexspi_pm_action(const struct device *dev, enum pm_device_action action)
367 {
368 	struct memc_flexspi_data *data = dev->data;
369 	int ret;
370 
371 	switch (action) {
372 	case PM_DEVICE_ACTION_RESUME:
373 		ret = pinctrl_apply_state(data->pincfg, PINCTRL_STATE_DEFAULT);
374 		if (ret < 0 && ret != -ENOENT) {
375 			return ret;
376 		}
377 		break;
378 	case PM_DEVICE_ACTION_SUSPEND:
379 		ret = pinctrl_apply_state(data->pincfg, PINCTRL_STATE_SLEEP);
380 		if (ret < 0 && ret != -ENOENT) {
381 			return ret;
382 		}
383 		break;
384 	default:
385 		return -ENOTSUP;
386 	}
387 
388 	return 0;
389 }
390 #endif
391 
392 #if defined(FSL_FEATURE_FLEXSPI_SUPPORT_SEPERATE_RXCLKSRC_PORTB) && \
393 	FSL_FEATURE_FLEXSPI_SUPPORT_SEPERATE_RXCLKSRC_PORTB
394 #define MEMC_FLEXSPI_RXCLK_B(inst) .rx_sample_clock_b = DT_INST_PROP(inst, rx_clock_source_b),
395 #else
396 #define MEMC_FLEXSPI_RXCLK_B(inst)
397 #endif
398 
399 #if defined(CONFIG_XIP) && defined(CONFIG_FLASH_MCUX_FLEXSPI_XIP)
400 /* Checks if image flash base address is in the FlexSPI AHB base region */
401 #define MEMC_FLEXSPI_CFG_XIP(node_id)						\
402 	((CONFIG_FLASH_BASE_ADDRESS) >= DT_REG_ADDR_BY_IDX(node_id, 1)) &&	\
403 	((CONFIG_FLASH_BASE_ADDRESS) < (DT_REG_ADDR_BY_IDX(node_id, 1) +	\
404 					DT_REG_SIZE_BY_IDX(node_id, 1)))
405 
406 #else
407 #define MEMC_FLEXSPI_CFG_XIP(node_id) false
408 #endif
409 
410 #define MEMC_FLEXSPI(n)							\
411 	PINCTRL_DT_INST_DEFINE(n);					\
412 	static uint16_t  buf_cfg_##n[] =				\
413 		DT_INST_PROP_OR(n, rx_buffer_config, {0});		\
414 									\
415 	static struct memc_flexspi_data					\
416 		memc_flexspi_data_##n = {				\
417 		.base = (FLEXSPI_Type *) DT_INST_REG_ADDR(n),		\
418 		.xip = MEMC_FLEXSPI_CFG_XIP(DT_DRV_INST(n)),		\
419 		.ahb_base = (uint8_t *) DT_INST_REG_ADDR_BY_IDX(n, 1),	\
420 		.ahb_bufferable = DT_INST_PROP(n, ahb_bufferable),	\
421 		.ahb_cacheable = DT_INST_PROP(n, ahb_cacheable),	\
422 		.ahb_prefetch = DT_INST_PROP(n, ahb_prefetch),		\
423 		.ahb_read_addr_opt = DT_INST_PROP(n, ahb_read_addr_opt),\
424 		.ahb_boundary = DT_INST_ENUM_IDX(n, ahb_boundary),	\
425 		.combination_mode = DT_INST_PROP(n, combination_mode),	\
426 		.sck_differential_clock = DT_INST_PROP(n, sck_differential_clock),	\
427 		.rx_sample_clock = DT_INST_PROP(n, rx_clock_source),	\
428 		MEMC_FLEXSPI_RXCLK_B(n)                                 \
429 		.buf_cfg = (struct memc_flexspi_buf_cfg *)buf_cfg_##n,	\
430 		.buf_cfg_cnt = sizeof(buf_cfg_##n) /			\
431 			sizeof(struct memc_flexspi_buf_cfg),		\
432 		.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n),		\
433 		.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)),     \
434 		.clock_subsys = (clock_control_subsys_t)                \
435 			DT_INST_CLOCKS_CELL(n, name),                   \
436 	};								\
437 									\
438 	PM_DEVICE_DT_INST_DEFINE(n, memc_flexspi_pm_action);		\
439 									\
440 	DEVICE_DT_INST_DEFINE(n,					\
441 			      memc_flexspi_init,			\
442 			      PM_DEVICE_DT_INST_GET(n),			\
443 			      &memc_flexspi_data_##n,			\
444 			      NULL,					\
445 			      POST_KERNEL,				\
446 			      CONFIG_MEMC_MCUX_FLEXSPI_INIT_PRIORITY,	\
447 			      NULL);
448 
449 DT_INST_FOREACH_STATUS_OKAY(MEMC_FLEXSPI)
450