1 /*
2  * Copyright (c) 2023 - 2024 Intel Corporation
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include "sedi_driver_pm.h"
8 #include "sedi_driver_core.h"
9 #include "sedi_driver_spi.h"
10 #include "sedi_driver_dma.h"
11 #include "sedi_spi_regs.h"
12 
13 #define SEDI_SPI_DRV_VERSION SEDI_DRIVER_VERSION_MAJOR_MINOR(1, 0)
14 
15 #define REG_INT_ERROR  \
16 	(SEDI_RBFVM(SPI, IMR, TXOIM, UNMASKED) | \
17 	 SEDI_RBFVM(SPI, IMR, RXUIM, UNMASKED) | \
18 	 SEDI_RBFVM(SPI, IMR, RXOIM, UNMASKED))
19 #define REG_INT_TX	\
20 	(SEDI_RBFVM(SPI, IMR, TXEIM, UNMASKED))
21 #define REG_INT_RX	\
22 	(SEDI_RBFVM(SPI, IMR, RXFIM, UNMASKED))
23 #define REG_INT_NONE (0)
24 
25 #define SPI_FRAME_SIZE_1_BYTE     (1)
26 #define SPI_FRAME_SIZE_2_BYTES    (2)
27 #define SPI_RECEIVE_MODE_MAX_SIZE (65536)
28 #define SSI_IC_FREQ               (sedi_pm_get_lbw_clock())
29 
30 #define SPI_BITWIDTH_4BITS  (SEDI_RBFV(SPI, CTRLR0, DFS, FRAME_04BITS) + 1)
31 #define SPI_BITWIDTH_8BITS  (SEDI_RBFV(SPI, CTRLR0, DFS, FRAME_08BITS) + 1)
32 #define SPI_BITWIDTH_16BITS (SEDI_RBFV(SPI, CTRLR0, DFS, FRAME_16BITS) + 1)
33 
34 /* Add easy usage for SSI Clock Divider */
35 SEDI_RBFV_DEFINE(SPI, BAUDR, SCKDV, MIN_PRESCALE, 0x2);
36 SEDI_RBFV_DEFINE(SPI, BAUDR, SCKDV, DEFAULT_PRESCALE, 0x14);
37 
38 #ifdef SPI_DW_2_0
39 /* ********* SPI SPI_CTRLR0 ***********
40  * SPI Control Register is valid only when SSI_SPI_MODE is either set to
41  * "Dual" or "Quad" or "Octal" mode
42  */
43 SEDI_REG_DEFINE(SPI, SPI_CTRLR0, 0xf4, RW, (uint32_t)0x7fb3f, (uint32_t)0x200);
44 SEDI_RBF_DEFINE(SPI, SPI_CTRLR0, ADDR_L, 2, 4, RW, (uint32_t)0x0);
45 SEDI_RBF_DEFINE(SPI, SPI_CTRLR0, INST_L, 8, 2, RW, (uint32_t)0x2);
46 SEDI_RBF_DEFINE(SPI, SPI_CTRLR0, WAIT_CYCLES, 11, 5, RW, (uint32_t)0x0);
47 /* Notice: there are several specific registers offset of RBF for SPI_DW_2_0
48  * List TFT as a example
49  */
50 SEDI_RBF_DEFINE(SPI, TXFTLR, TFT, 0, 16, RW, (uint32_t)0x0);
51 #endif
52 
53 #define SEDI_SPI_POLL_WAIT(_cond) SEDI_POLL_WAIT((_cond), 100)
54 
55 struct spi_context {
56 	/* hardware config */
57 	sedi_spi_regs_t *base;
58 	uint32_t dma_handshake;
59 	uint32_t rx_handshake;
60 
61 	/* sedi required */
62 	sedi_spi_capabilities_t capability;
63 	sedi_spi_status_t status;
64 	sedi_spi_event_cb_t cb_event;
65 	void *cb_param;
66 
67 	/* ioctl info */
68 	uint8_t frame_size; /* Frame size in byte */
69 	uint8_t tx_watermark;
70 	uint8_t rx_watermark;
71 	uint32_t prescale;
72 	uint32_t dummy_data;
73 	bool is_lsb;
74 	bool is_cs_continuous;
75 
76 	/* transfer info */
77 	uint8_t transfer_mode;
78 	uint8_t *data_tx;
79 	uint8_t *data_rx;
80 	uint32_t tx_data_len;
81 	uint32_t rx_data_len;
82 	uint32_t data_tx_idx;
83 	uint32_t data_rx_idx;
84 
85 	/* For dma transfer */
86 	bool dma_tx_finished;
87 	bool dma_rx_finished;
88 	uint32_t tx_dma;
89 	uint32_t rx_dma;
90 	uint8_t tx_channel;
91 	uint8_t rx_channel;
92 	uint32_t dma_cycles; /* For large data DMA transfer */
93 	uint32_t dma_idx; /* For large data DMA transfer */
94 	uint32_t last_dma_counts; /* For large data DMA transfer */
95 
96 	/* For qspi */
97 	bool is_quad;
98 	const sedi_spi_enhanced_config_t *quad_config;
99 };
100 
101 static const sedi_driver_version_t driver_version = {SEDI_SPI_API_VERSION,
102 						     SEDI_SPI_DRV_VERSION};
103 
104 static sedi_spi_capabilities_t driver_capabilities[SEDI_SPI_NUM] = {0};
105 
106 #define SPI_CONTEXT_INIT(x)                                                                        \
107 	{                                                                                          \
108 		.base = (sedi_spi_regs_t *)SEDI_IREG_BASE(SPI, x),                                 \
109 		.dma_handshake = DMA_HWID_SPI##x##_TX, .dummy_data = 0x00,                         \
110 		.rx_handshake = DMA_HWID_SPI##x##_RX                                               \
111 	}
112 
113 static struct spi_context spi_contexts[SEDI_SPI_NUM] = { SPI_CONTEXT_INIT(0), SPI_CONTEXT_INIT(1) };
114 
115 static const uint8_t bit_reverse_table[] = {
116 	0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0,
117 	0x30, 0xB0, 0x70, 0xF0, 0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8,
118 	0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8, 0x04, 0x84, 0x44, 0xC4,
119 	0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4,
120 	0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC,
121 	0x3C, 0xBC, 0x7C, 0xFC, 0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2,
122 	0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2, 0x0A, 0x8A, 0x4A, 0xCA,
123 	0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA,
124 	0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6,
125 	0x36, 0xB6, 0x76, 0xF6, 0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE,
126 	0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE, 0x01, 0x81, 0x41, 0xC1,
127 	0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1,
128 	0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9,
129 	0x39, 0xB9, 0x79, 0xF9, 0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5,
130 	0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5, 0x0D, 0x8D, 0x4D, 0xCD,
131 	0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD,
132 	0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3,
133 	0x33, 0xB3, 0x73, 0xF3, 0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB,
134 	0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB, 0x07, 0x87, 0x47, 0xC7,
135 	0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7,
136 	0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF,
137 	0x3F, 0xBF, 0x7F, 0xFF
138 };
139 
msb_lsb_convert_8bits(uint8_t * val,uint32_t len)140 static void msb_lsb_convert_8bits(uint8_t *val, uint32_t len)
141 {
142 	uint32_t i;
143 	uint8_t idx;
144 
145 	for (i = 0; i < len; i++) {
146 		idx = val[i];
147 		val[i] = bit_reverse_table[idx];
148 	}
149 }
150 
msb_lsb_convert_16bits(uint16_t * val,uint32_t len)151 static void msb_lsb_convert_16bits(uint16_t *val, uint32_t len)
152 {
153 	uint32_t i;
154 	uint16_t idx;
155 
156 	for (i = 0; i < len; i++) {
157 		idx = val[i];
158 		val[i] = (bit_reverse_table[idx & 0xFF] << 8U) |
159 			 (bit_reverse_table[(idx & 0xFF00) >> 8U]);
160 	}
161 }
162 
lld_spi_enable(sedi_spi_regs_t * spi,bool enable)163 static inline void lld_spi_enable(sedi_spi_regs_t *spi, bool enable)
164 {
165 	uint32_t val = enable ? SEDI_RBFV(SPI, SSIENR, SSI_EN, ENABLED) :
166 			SEDI_RBFV(SPI, SSIENR, SSI_EN, DISABLE);
167 
168 	if (SEDI_PREG_RBFV_GET(SPI, SSIENR, SSI_EN, &spi->ssienr) == val) {
169 		return;
170 	}
171 	/* prevent pending interrupt */
172 	spi->imr = 0;
173 
174 	SEDI_PREG_RBF_SET(SPI, SSIENR, SSI_EN, val, &spi->ssienr);
175 
176 	SEDI_SPI_POLL_WAIT(SEDI_PREG_RBFV_GET(SPI, SSIENR, SSI_EN, &spi->ssienr) != val);
177 }
178 
lld_spi_dma_enable(sedi_spi_regs_t * spi,bool enable)179 static inline void lld_spi_dma_enable(sedi_spi_regs_t *spi, bool enable)
180 {
181 	if (enable) {
182 		SEDI_PREG_RBFV_SET(SPI, DMACR, TDMAE, ENABLED, &spi->dmacr);
183 		SEDI_PREG_RBFV_SET(SPI, DMACR, RDMAE, ENABLED, &spi->dmacr);
184 	} else {
185 		SEDI_PREG_RBFV_SET(SPI, DMACR, TDMAE, DISABLE, &spi->dmacr);
186 		SEDI_PREG_RBFV_SET(SPI, DMACR, RDMAE, DISABLE, &spi->dmacr);
187 	}
188 }
189 
lld_spi_config_interrupt(sedi_spi_regs_t * spi,uint32_t mask)190 static inline void lld_spi_config_interrupt(sedi_spi_regs_t *spi, uint32_t mask)
191 {
192 	SEDI_PREG_SET(SPI, IMR, mask, &spi->imr);
193 }
194 
lld_spi_is_busy(sedi_spi_regs_t * spi)195 static inline bool lld_spi_is_busy(sedi_spi_regs_t *spi)
196 {
197 	return (SEDI_PREG_RBFV_IS_SET(SPI, SR, BUSY, ACTIVE, &spi->sr)) ||
198 			(SEDI_PREG_RBFV_IS_SET(SPI, SR, TFE, NOT_EMPTY, &spi->sr)) ? true : false;
199 }
200 
lld_spi_is_enabled(sedi_spi_regs_t * spi)201 static inline bool lld_spi_is_enabled(sedi_spi_regs_t *spi)
202 {
203 	return SEDI_PREG_RBFV_GET(SPI, SSIENR, SSI_EN, &spi->ssienr) ? true : false;
204 }
205 
lld_spi_interrupt_clear(sedi_spi_regs_t * spi)206 static inline uint32_t lld_spi_interrupt_clear(sedi_spi_regs_t *spi)
207 {
208 	uint32_t tmp;
209 	uint32_t isr;
210 
211 	PARAM_UNUSED(tmp);
212 
213 	isr = spi->isr;
214 	tmp = SEDI_PREG_RBFV_GET(SPI, ICR, ICR, &spi->icr);
215 
216 	/* Clear all error interrupt by read*/
217 	tmp = SEDI_PREG_RBFV_GET(SPI, TXOICR, TXOICR, &spi->txoicr);
218 	tmp = SEDI_PREG_RBFV_GET(SPI, RXOICR, RXOICR, &spi->rxoicr);
219 	tmp = SEDI_PREG_RBFV_GET(SPI, RXUICR, RXUICR, &spi->rxuicr);
220 
221 	return isr;
222 
223 }
224 
lld_spi_default_config(sedi_spi_t spi_device)225 static int lld_spi_default_config(sedi_spi_t spi_device)
226 {
227 	struct spi_context *context = &spi_contexts[spi_device];
228 	sedi_spi_regs_t *spi = context->base;
229 
230 	uint32_t watermark = SPI_FIFO_DEPTH / 2 - 1;
231 
232 	uint8_t loopback = SEDI_RBFV(SPI, CTRLR0, SRL, NORMAL_MODE);
233 	/* DFS: Data Frame size only valid when SSI_MAX_XFER_SIZE is configured to
234 	 * 16, if SSI_MAX_XFER_SIZE is configured to 32, then writing to this field
235 	 * will not have any effect
236 	 * DFS_32: only valid when SSI_MAX_XFER_SIZE is configured to 32
237 	 */
238 	uint8_t width = SEDI_RBFV(SPI, CTRLR0, DFS_32, FRAME_08BITS);
239 	uint8_t cs_mask = SEDI_RBFV(SPI, SER, SER, SELECTED);
240 	uint32_t prescale = SEDI_RBFV(SPI, BAUDR, SCKDV, DEFAULT_PRESCALE);
241 
242 	/* Disable SPI first */
243 	lld_spi_enable(spi, false);
244 
245 	/* Set default SPI watermark */
246 	SEDI_PREG_RBF_SET(SPI, TXFTLR, TFT, watermark, &spi->txftlr);
247 	SEDI_PREG_RBF_SET(SPI, RXFTLR, RFT, watermark, &spi->rxftlr);
248 	SEDI_PREG_RBF_SET(SPI, DMATDLR, DMATDL, watermark, &spi->dmatdlr);
249 	SEDI_PREG_RBF_SET(SPI, DMARDLR, DMARDL, watermark, &spi->dmardlr);
250 
251 	SEDI_PREG_RBF_SET(SPI, CTRLR0, DFS_32, width, &spi->ctrlr0);
252 	SEDI_PREG_RBF_SET(SPI, CTRLR0, SRL, loopback, &spi->ctrlr0);
253 	SEDI_PREG_RBF_SET(SPI, CTRLR0, SSTE, 0, &spi->ctrlr0);
254 	SEDI_PREG_RBF_SET(SPI, BAUDR, SCKDV, prescale, &spi->baudr);
255 	SEDI_PREG_RBF_SET(SPI, SER, SER, cs_mask, &spi->ser);
256 
257 	/* Update context default settings */
258 	context->tx_watermark = watermark + 1U;
259 	context->rx_watermark = watermark + 1U;
260 	context->prescale = prescale;
261 	context->frame_size = 1U;
262 	context->is_lsb = false;
263 
264 	return 0;
265 
266 }
267 
lld_spi_config_cpol_cpha(sedi_spi_regs_t * spi,int cpol,int cpha)268 static inline void lld_spi_config_cpol_cpha(sedi_spi_regs_t *spi, int cpol, int cpha)
269 {
270 	cpol = cpol ? SEDI_RBFV(SPI, CTRLR0, SCPOL, SCLK_HIGH) :
271 			SEDI_RBFV(SPI, CTRLR0, SCPOL, SCLK_LOW);
272 	cpha = cpha ? SEDI_RBFV(SPI, CTRLR0, SCPH, SCPH_START) :
273 			SEDI_RBFV(SPI, CTRLR0, SCPH, SCPH_MIDDLE);
274 
275 	SEDI_PREG_RBF_SET(SPI, CTRLR0, SCPH, cpol, &spi->ctrlr0);
276 	SEDI_PREG_RBF_SET(SPI, CTRLR0, SCPOL, cpha, &spi->ctrlr0);
277 }
278 
lld_spi_config_loopback(sedi_spi_regs_t * spi,int loopback)279 static inline void lld_spi_config_loopback(sedi_spi_regs_t *spi, int loopback)
280 {
281 	loopback = loopback ? SEDI_RBFV(SPI, CTRLR0, SRL, TESTING_MODE) :
282 			SEDI_RBFV(SPI, CTRLR0, SRL, NORMAL_MODE);
283 	SEDI_PREG_RBF_SET(SPI, CTRLR0, SRL, loopback, &spi->ctrlr0);
284 }
285 
lld_spi_config_prescale(sedi_spi_regs_t * spi,uint32_t prescale)286 static inline void lld_spi_config_prescale(sedi_spi_regs_t *spi, uint32_t prescale)
287 {
288 	SEDI_PREG_RBF_SET(SPI, BAUDR, SCKDV, prescale, &spi->baudr);
289 }
290 
lld_spi_config_width(sedi_spi_regs_t * spi,uint8_t width)291 static inline void lld_spi_config_width(sedi_spi_regs_t *spi, uint8_t width)
292 {
293 	/* DFS: Data Frame size only valid when SSI_MAX_XFER_SIZE is configured to
294 	 * 16, if SSI_MAX_XFER_SIZE is configured to 32, then writing to this field
295 	 * will not have any effect
296 	 * DFS_32: only valid when SSI_MAX_XFER_SIZE is configured to 32
297 	 */
298 	SEDI_PREG_RBF_SET(SPI, CTRLR0, DFS_32, width - 1, &spi->ctrlr0);
299 }
300 
lld_spi_set_tx_watermark(sedi_spi_regs_t * spi,uint32_t watermark)301 static inline void lld_spi_set_tx_watermark(sedi_spi_regs_t *spi, uint32_t watermark)
302 {
303 	SEDI_PREG_RBF_SET(SPI, TXFTLR, TFT, watermark - 1, &spi->txftlr);
304 }
305 
lld_spi_set_rx_watermark(sedi_spi_regs_t * spi,uint32_t watermark)306 static inline void lld_spi_set_rx_watermark(sedi_spi_regs_t *spi, uint32_t watermark)
307 {
308 	SEDI_PREG_RBF_SET(SPI, RXFTLR, RFT, watermark - 1, &spi->rxftlr);
309 }
310 
lld_spi_config_cs(sedi_spi_regs_t * spi,uint32_t cs_mask)311 static inline void lld_spi_config_cs(sedi_spi_regs_t *spi, uint32_t cs_mask)
312 {
313 	SEDI_PREG_RBF_SET(SPI, SER, SER, cs_mask, &spi->ser);
314 }
315 
lld_spi_set_transfer_mode(sedi_spi_t spi_device,IN uint8_t * data_out,OUT uint8_t * data_in)316 static void lld_spi_set_transfer_mode(sedi_spi_t spi_device,
317 				      IN uint8_t *data_out,
318 				      OUT uint8_t *data_in)
319 {
320 	struct spi_context *context = &spi_contexts[spi_device];
321 	sedi_spi_regs_t *spi = context->base;
322 
323 	if (data_out == NULL) {
324 		/* Set to receive only mode */
325 		SEDI_PREG_RBFV_SET(SPI, CTRLR0, TMOD, RX_ONLY, &spi->ctrlr0);
326 		context->transfer_mode = SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY);
327 	} else if (data_in == NULL) {
328 		/* Set to transmit only mode */
329 		SEDI_PREG_RBFV_SET(SPI, CTRLR0, TMOD, TX_ONLY, &spi->ctrlr0);
330 		context->transfer_mode = SEDI_RBFV(SPI, CTRLR0, TMOD, TX_ONLY);
331 	} else {
332 		SEDI_PREG_RBFV_SET(SPI, CTRLR0, TMOD, TX_AND_RX, &spi->ctrlr0);
333 		context->transfer_mode = SEDI_RBFV(SPI, CTRLR0, TMOD, TX_AND_RX);
334 	}
335 
336 }
337 
lld_spi_fill_fifo(sedi_spi_t spi_device,uint8_t frame_size,IN uint8_t * buff,uint32_t count)338 static int lld_spi_fill_fifo(sedi_spi_t spi_device, uint8_t frame_size,
339 			     IN uint8_t *buff, uint32_t count)
340 {
341 	struct spi_context *context = &spi_contexts[spi_device];
342 	sedi_spi_regs_t *spi = context->base;
343 	uint32_t size = 0;
344 	uint32_t data = 0;
345 
346 	/* In quad mode, need to send opcode and addr first */
347 	if ((context->is_quad) && (context->quad_config)) {
348 		const sedi_spi_enhanced_config_t *config = context->quad_config;
349 
350 		/* Instruction need 1 entry */
351 		SEDI_PREG_SET(SPI, DR0, *(config->inst_buf), &spi->dr0);
352 		SEDI_PREG_SET(SPI, DR0, *((uint32_t *)(config->addr_buf)), &spi->dr0);
353 		/* After fill in addr and instruction, no need to keep quad state,
354 		just transfer data as standard SPI */
355 		context->is_quad = false;
356 		context->quad_config = NULL;
357 		if (context->transfer_mode == SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) {
358 			return 0;
359 		}
360 	}
361 
362 	/* Check how many empty entry in FIFO */
363 	size = (SPI_FIFO_DEPTH - spi->txflr) * frame_size;
364 
365 	/* Get the number which can be filled to fifo */
366 	size = (count > size) ? size : count;
367 	/* Used to return the actual fill size in bytes */
368 	count = size;
369 	while (size) {
370 		/* Get the data in a FIFO entry */
371 		if (buff) {
372 			switch (frame_size) {
373 			case SPI_FRAME_SIZE_1_BYTE:
374 				data = (uint32_t)(*buff);
375 				break;
376 			case SPI_FRAME_SIZE_2_BYTES:
377 				data = (uint32_t)(*(uint16_t *)buff);
378 				break;
379 			default:
380 				break;
381 			}
382 			/* Update size */
383 			buff += frame_size;
384 		} else {
385 			data = context->dummy_data;
386 		}
387 		/* Write data */
388 		SEDI_PREG_SET(SPI, DR0, data, &spi->dr0);
389 		size -= frame_size;
390 	}
391 
392 	return count;
393 }
394 
lld_spi_receive_fifo(sedi_spi_regs_t * spi,uint8_t frame_size,OUT uint8_t * buff,uint32_t count)395 static int lld_spi_receive_fifo(sedi_spi_regs_t *spi, uint8_t frame_size,
396 				OUT uint8_t *buff, uint32_t count)
397 {
398 	uint32_t size = spi->rxflr * frame_size;
399 	uint32_t data;
400 
401 	/* Get the number which can be filled to fifo */
402 	size = (count > size) ? size : count;
403 	/* Used to return the actual fill size in bytes */
404 	count = size;
405 	while (size) {
406 		/* Get the data in a FIFO entry */
407 		data = SEDI_PREG_GET(SPI, DR0, &spi->dr0);
408 		if (buff) {
409 			switch (frame_size) {
410 			case SPI_FRAME_SIZE_1_BYTE:
411 				*buff = (data & 0xFF);
412 				break;
413 			case SPI_FRAME_SIZE_2_BYTES:
414 				buff[0] = (data & 0xFF);
415 				buff[1] = ((data >> 8U) & 0xFF);
416 				break;
417 			default:
418 				break;
419 			}
420 			/* Update size and buff */
421 			buff += frame_size;
422 		}
423 		size -= frame_size;
424 	}
425 
426 	return count;
427 }
428 
lld_spi_dr_address(sedi_spi_regs_t * spi)429 static inline uint32_t lld_spi_dr_address(sedi_spi_regs_t *spi)
430 {
431 #ifdef SEDI_SPI_USE_DMA
432 	uint32_t ret = SEDI_SPI_0_REG_DMA + (uint32_t)&((sedi_spi_regs_t *)0)->dr0;
433 	return ret;
434 #else
435 	return (uint32_t)&spi->dr0;
436 #endif
437 }
438 
spi_bit_reverse(uint8_t * val,uint32_t len,uint8_t frame_size)439 static void spi_bit_reverse(uint8_t *val, uint32_t len, uint8_t frame_size)
440 {
441 	if (frame_size == SPI_FRAME_SIZE_1_BYTE) {
442 		msb_lsb_convert_8bits(val, len);
443 	} else {
444 		msb_lsb_convert_16bits((uint16_t *)val, len / frame_size);
445 	}
446 }
447 
lld_spi_set_ti_mode(sedi_spi_regs_t * spi)448 static void lld_spi_set_ti_mode(sedi_spi_regs_t *spi)
449 {
450 	if (lld_spi_is_enabled(spi) == true) {
451 		lld_spi_enable(spi, false);
452 	}
453 	SEDI_PREG_RBFV_SET(SPI, CTRLR0, FRF, TEXAS_SSP, &spi->ctrlr0);
454 }
455 
lld_spi_set_microwire_mode(sedi_spi_regs_t * spi,sedi_spi_microwire_config_t * config)456 static void lld_spi_set_microwire_mode(sedi_spi_regs_t *spi,
457 				       sedi_spi_microwire_config_t *config)
458 {
459 	if (lld_spi_is_enabled(spi) == true) {
460 		lld_spi_enable(spi, false);
461 	}
462 
463 	SEDI_PREG_RBFV_SET(SPI, CTRLR0, FRF, NS_MICROWIRE, &spi->ctrlr0);
464 
465 	/* Configure microwire mode */
466 	SEDI_PREG_RBF_SET(SPI, MWCR, MHS, config->microwire_handshake, &spi->mwcr);
467 	SEDI_PREG_RBF_SET(SPI, MWCR, MDD, config->data_direction_tx, &spi->mwcr);
468 	SEDI_PREG_RBF_SET(SPI, MWCR, MWMOD, config->sequential_mode, &spi->mwcr);
469 }
470 
lld_spi_set_line_mode(sedi_spi_regs_t * spi,spi_line_mode_t mode)471 static void lld_spi_set_line_mode(sedi_spi_regs_t *spi, spi_line_mode_t mode)
472 {
473 	/* SPI_FRF: SPI Frame Format Bits RO and only valid when SSI_SPI_MODE is
474 	 * either set to "Dual" or "Quad" or "Octal" mode, so add #ifdef SPI_DW_2_0
475 	 */
476 #ifdef SPI_DW_2_0
477 	lld_spi_enable(spi, false);
478 
479 	SEDI_PREG_RBF_SET(SPI, CTRLR0, SPI_FRF, mode, &spi->ctrlr0);
480 #endif
481 }
482 
483 #ifdef SPI_DW_2_0
dw_spi_set_start_condition(struct spi_context * context,uint32_t num)484 static void dw_spi_set_start_condition(struct spi_context *context, uint32_t num)
485 {
486 	sedi_spi_regs_t *spi = context->base;
487 	uint32_t start_frame = 0;
488 
489 	/* Set the send start condition to improve efficiency */
490 	if (context->quad_config) {
491 		/* enhanced mode includes 2 frames for opcode and addr */
492 		start_frame = num / (context->frame_size) + 2;
493 	} else {
494 		start_frame = num / (context->frame_size);
495 	}
496 
497 	/* Compare with FIFO depth */
498 	if (start_frame < SPI_FIFO_DEPTH) {
499 		SEDI_PREG_RBF_SET(SPI, TXFTLR, TFT, start_frame - 1, &spi->txftlr);
500 	} else {
501 		SEDI_PREG_RBF_SET(SPI, TXFTLR, TFT, SPI_FIFO_DEPTH - 1, &spi->txftlr);
502 	}
503 }
504 
505 #endif
506 /******************************************************************************
507  * SEDI interface
508  *****************************************************************************/
509 
sedi_spi_get_version(void)510 sedi_driver_version_t sedi_spi_get_version(void)
511 {
512 	return driver_version;
513 }
514 
sedi_spi_get_capabilities(IN sedi_spi_t spi_device,sedi_spi_capabilities_t * cap)515 int32_t sedi_spi_get_capabilities(IN sedi_spi_t spi_device,
516 				  sedi_spi_capabilities_t *cap)
517 {
518 	DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
519 
520 	if (sedi_dev_is_self_owned(SEDI_DEVID_SPI0 + spi_device)) {
521 		driver_capabilities[spi_device].is_available = 1;
522 	} else {
523 		driver_capabilities[spi_device].is_available = 0;
524 	}
525 
526 	driver_capabilities[spi_device].ti_ssi = 1;
527 	driver_capabilities[spi_device].microwire = 1;
528 	driver_capabilities[spi_device].mode_fault = 0;
529 
530 	*cap = driver_capabilities[spi_device];
531 
532 	return SEDI_DRIVER_OK;
533 }
534 
sedi_spi_init(IN sedi_spi_t spi_device,IN sedi_spi_event_cb_t cb_event,INOUT void * param,IN uint32_t base)535 int32_t sedi_spi_init(IN sedi_spi_t spi_device, IN sedi_spi_event_cb_t cb_event,
536 		      INOUT void *param, IN uint32_t base)
537 {
538 	DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
539 
540 	struct spi_context *context = &spi_contexts[spi_device];
541 
542 	if (!sedi_dev_is_self_owned(SEDI_DEVID_SPI0 + spi_device)) {
543 		return SEDI_DRIVER_ERROR_NO_DEV;
544 	}
545 
546 	context->cb_event = cb_event;
547 	context->cb_param = param;
548 
549 	context->base = (sedi_spi_regs_t *)base;
550 
551 	return SEDI_DRIVER_OK;
552 }
553 
sedi_spi_uninit(IN sedi_spi_t spi_device)554 int32_t sedi_spi_uninit(IN sedi_spi_t spi_device)
555 {
556 	DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
557 
558 	struct spi_context *context = &spi_contexts[spi_device];
559 
560 	context->cb_event = NULL;
561 	context->is_lsb = false;
562 
563 	return SEDI_DRIVER_OK;
564 }
565 
sedi_spi_set_power(IN sedi_spi_t spi_device,IN sedi_power_state_t state)566 int32_t sedi_spi_set_power(IN sedi_spi_t spi_device,
567 			   IN sedi_power_state_t state)
568 {
569 	DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
570 	sedi_devid_t id = SEDI_DEVID_SPI0 + spi_device;
571 	struct spi_context *context = &spi_contexts[spi_device];
572 	int32_t ret = SEDI_DRIVER_OK;
573 
574 	switch (state) {
575 	case SEDI_POWER_FULL:
576 		sedi_pm_set_device_power(id, state);
577 		lld_spi_default_config(spi_device);
578 		break;
579 	case SEDI_POWER_SUSPEND:
580 	case SEDI_POWER_FORCE_SUSPEND:
581 	case SEDI_POWER_LOW:
582 		lld_spi_enable(context->base, false);
583 		sedi_pm_set_device_power(id, state);
584 		break;
585 	case SEDI_POWER_OFF:
586 	default:
587 		ret = SEDI_DRIVER_ERROR_UNSUPPORTED;
588 		break;
589 	}
590 
591 	return ret;
592 }
593 
sedi_spi_get_data_count(IN sedi_spi_t spi_device)594 int32_t sedi_spi_get_data_count(IN sedi_spi_t spi_device)
595 {
596 	DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
597 
598 	struct spi_context *context = &spi_contexts[spi_device];
599 
600 	if (context->data_tx) {
601 		return context->data_tx_idx;
602 	} else {
603 		return context->data_rx_idx;
604 	}
605 }
606 
sedi_spi_get_status(IN sedi_spi_t spi_device,sedi_spi_status_t * status)607 int32_t sedi_spi_get_status(IN sedi_spi_t spi_device, sedi_spi_status_t *status)
608 {
609 	DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
610 	DBG_CHECK(NULL != status, SEDI_DRIVER_ERROR_PARAMETER);
611 
612 	struct spi_context *context = &spi_contexts[spi_device];
613 	sedi_spi_regs_t *reg = context->base;
614 
615 	status->busy = context->status.busy;
616 	status->data_lost = context->status.data_lost;
617 	status->mode_fault = context->status.mode_fault;
618 	status->isr = reg->isr;
619 	status->sr = reg->sr;
620 	status->txflr = reg->txflr;
621 	status->rxflr = reg->rxflr;
622 
623 	return SEDI_DRIVER_OK;
624 }
625 
sedi_spi_control(IN sedi_spi_t spi_device,IN uint32_t control,IN uint32_t arg)626 int32_t sedi_spi_control(IN sedi_spi_t spi_device, IN uint32_t control,
627 			 IN uint32_t arg)
628 {
629 	DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
630 	DBG_CHECK(control < SEDI_SPI_IOCTL_NUM, SEDI_DRIVER_ERROR_PARAMETER);
631 
632 	struct spi_context *context = &spi_contexts[spi_device];
633 
634 	switch (control) {
635 	case SEDI_SPI_IOCTL_CPOL0_CPHA0:
636 		lld_spi_config_cpol_cpha(context->base, 0, 0);
637 		break;
638 	case SEDI_SPI_IOCTL_CPOL0_CPHA1:
639 		lld_spi_config_cpol_cpha(context->base, 0, 1);
640 		break;
641 	case SEDI_SPI_IOCTL_CPOL1_CPHA0:
642 		lld_spi_config_cpol_cpha(context->base, 1, 0);
643 		break;
644 	case SEDI_SPI_IOCTL_CPOL1_CPHA1:
645 		lld_spi_config_cpol_cpha(context->base, 1, 1);
646 		break;
647 	case SEDI_SPI_IOCTL_TI_SSI:
648 		lld_spi_set_ti_mode(context->base);
649 		break;
650 	case SEDI_SPI_IOCTL_MICROWIRE:
651 		lld_spi_set_microwire_mode(context->base,
652 					   (sedi_spi_microwire_config_t *)arg);
653 		break;
654 	case SEDI_SPI_IOCTL_MSB:
655 		context->is_lsb = false;
656 		break;
657 	case SEDI_SPI_IOCTL_LSB:
658 		context->is_lsb = true;
659 		break;
660 	case SEDI_SPI_IOCTL_DATA_WIDTH:
661 		DBG_CHECK(((arg == SPI_BITWIDTH_4BITS) ||
662 			   (arg == SPI_BITWIDTH_8BITS) ||
663 			   (arg == SPI_BITWIDTH_16BITS)),
664 			  SEDI_DRIVER_ERROR_PARAMETER);
665 		context->frame_size = (uint8_t)arg / 8U;
666 		/* For 4 bits operation, operate like 8 bits */
667 		if (context->frame_size == 0) {
668 			context->frame_size = SPI_FRAME_SIZE_1_BYTE;
669 		}
670 		lld_spi_config_width(context->base, (uint8_t)arg);
671 		break;
672 	case SEDI_SPI_IOCTL_CS_UNUSED:
673 		lld_spi_config_cs(context->base, 0U);
674 		break;
675 	case SEDI_SPI_IOCTL_CS_HW:
676 		lld_spi_config_cs(context->base, (uint32_t)arg);
677 		break;
678 	case SEDI_SPI_IOCTL_SPEED_SET:
679 		context->prescale = SSI_IC_FREQ / (uint32_t)arg;
680 		if (context->prescale < SEDI_RBFV(SPI, BAUDR, SCKDV, MIN_PRESCALE)) {
681 			context->prescale = SEDI_RBFV(SPI, BAUDR, SCKDV, MIN_PRESCALE);
682 		}
683 		lld_spi_config_prescale(context->base, context->prescale);
684 		break;
685 	case SEDI_SPI_IOCTL_TX_WATERMARK_SET:
686 		context->tx_watermark = (uint32_t)arg;
687 		lld_spi_set_tx_watermark(context->base, (uint32_t)arg);
688 		break;
689 	case SEDI_SPI_IOCTL_RX_WATERMARK_SET:
690 		context->rx_watermark = (uint32_t)arg;
691 		lld_spi_set_rx_watermark(context->base, (uint32_t)arg);
692 		break;
693 	case SEDI_SPI_IOCTL_DUMMY_DATA:
694 		context->dummy_data = (uint32_t)arg;
695 		break;
696 	case SEDI_SPI_IOCTL_LOOPBACK:
697 		lld_spi_config_loopback(context->base, (bool)arg);
698 		break;
699 	case SEDI_SPI_IOCTL_SPEED_GET:
700 		*((uint32_t *)arg) = SSI_IC_FREQ / context->prescale;
701 		break;
702 	case SEDI_SPI_IOCTL_TX_WATERMARK_GET:
703 		*((uint32_t *)arg) = context->tx_watermark;
704 		break;
705 	case SEDI_SPI_IOCTL_RX_WATERMARK_GET:
706 		*((uint32_t *)arg) = context->rx_watermark;
707 		break;
708 	case SEDI_SPI_IOCTL_ABORT:
709 		lld_spi_enable(context->base, false);
710 		lld_spi_config_interrupt(context->base, REG_INT_NONE);
711 #ifdef SEDI_SPI_USE_DMA
712 		lld_spi_dma_enable(context->base, false);
713 #endif
714 		context->status.busy = 0;
715 		break;
716 	case SEDI_SPI_IOCTL_BUFFER_SETS:
717 		context->is_cs_continuous = (bool)arg;
718 		break;
719 	case SEDI_SPI_IOCTL_LINE_MODE:
720 		lld_spi_set_line_mode(context->base, (spi_line_mode_t)arg);
721 		break;
722 	default:
723 		break;
724 	}
725 
726 	return SEDI_DRIVER_OK;
727 }
728 
729 #ifdef SEDI_SPI_USE_DMA
730 static void callback_dma_transfer(const sedi_dma_t dma, const int chan,
731 				  const int event, void *param);
732 
config_and_enable_dma_channel(sedi_spi_t spi_dev,int dma,int handshake,int chan,int width,int burst,uint32_t src,uint32_t dst,uint32_t len,int is_tx)733 static int config_and_enable_dma_channel(sedi_spi_t spi_dev, int dma,
734 					 int handshake, int chan, int width,
735 					 int burst, uint32_t src, uint32_t dst,
736 					 uint32_t len, int is_tx)
737 {
738 	int ret;
739 	int dma_dir;
740 	int dma_per_dir;
741 	dma_transfer_width_t wid = DMA_TRANS_WIDTH_8;
742 
743 	PARAM_UNUSED(
744 	    burst); /* Set burst to 1 to finish transfer all data size */
745 
746 	if (is_tx) {
747 		dma_dir = DMA_MEMORY_TO_PERIPHERAL;
748 		dma_per_dir = DMA_HS_PER_TX;
749 	} else {
750 		dma_dir = DMA_PERIPHERAL_TO_MEMORY;
751 		dma_per_dir = DMA_HS_PER_RX;
752 	}
753 
754 	switch (width) {
755 	case 1:
756 		wid = DMA_TRANS_WIDTH_8;
757 		break;
758 	case 2:
759 		wid = DMA_TRANS_WIDTH_16;
760 		break;
761 	case 4:
762 		wid = DMA_TRANS_WIDTH_32;
763 		break;
764 	default:
765 		break;
766 	}
767 
768 	ret = sedi_dma_init(dma, chan, callback_dma_transfer, (void *)spi_dev);
769 	DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
770 
771 	ret = sedi_dma_set_power(dma, chan, SEDI_POWER_FULL);
772 	DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
773 
774 	ret = sedi_dma_control(dma, chan, SEDI_CONFIG_DMA_BURST_LENGTH,
775 			       DMA_BURST_TRANS_LENGTH_1);
776 	DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
777 
778 	ret = sedi_dma_control(dma, chan, SEDI_CONFIG_DMA_SR_TRANS_WIDTH, wid);
779 	DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
780 
781 	ret = sedi_dma_control(dma, chan, SEDI_CONFIG_DMA_DT_TRANS_WIDTH, wid);
782 	DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
783 
784 	ret = sedi_dma_control(dma, chan, SEDI_CONFIG_DMA_HS_DEVICE_ID,
785 			       handshake);
786 	DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
787 
788 	ret = sedi_dma_control(dma, chan, SEDI_CONFIG_DMA_HS_POLARITY,
789 			       DMA_HS_POLARITY_HIGH);
790 	DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
791 
792 	ret = sedi_dma_control(dma, chan, SEDI_CONFIG_DMA_DIRECTION, dma_dir);
793 	DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
794 
795 	ret = sedi_dma_control(dma, chan, SEDI_CONFIG_DMA_HS_DEVICE_ID_PER_DIR,
796 			       dma_per_dir);
797 	DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
798 
799 
800 	return 0;
801 }
802 
callback_dma_transfer(const sedi_dma_t dma,const int chan,const int event,void * param)803 static void callback_dma_transfer(const sedi_dma_t dma, const int chan,
804 				  const int event, void *param)
805 {
806 	sedi_spi_t spi_device = (sedi_spi_t)param;
807 
808 	struct spi_context *context = &spi_contexts[spi_device];
809 	uint32_t len = SEDI_DMA_PERIPH_MAX_SIZE;
810 
811 	/* release the dma resource */
812 	sedi_dma_set_power(dma, chan, SEDI_POWER_OFF);
813 	sedi_dma_uninit(dma, chan);
814 
815 	if (event != SEDI_DMA_EVENT_TRANSFER_DONE) {
816 		if (context->cb_event) {
817 			context->cb_event(SEDI_SPI_EVENT_DATA_LOST,
818 						  context->cb_param);
819 		}
820 
821 		goto f_out;
822 	}
823 
824 	/* See tx or rx finished */
825 	if (chan == context->tx_channel) {
826 		context->dma_tx_finished = true;
827 		context->data_tx_idx = context->tx_data_len;
828 		/* Recover LSB reverse, DMA mode tx buff pointer not changed */
829 		if (context->is_lsb == true) {
830 			spi_bit_reverse(context->data_tx, context->tx_data_len,
831 					context->frame_size);
832 			sedi_core_clean_dcache_by_addr(
833 				(uint32_t *)(context->data_tx),
834 				context->tx_data_len);
835 		}
836 		/* Waiting for TX FIFO empty */
837 		SEDI_SPI_POLL_WAIT(lld_spi_is_busy(context->base));
838 	} else if (chan == context->rx_channel) {
839 		context->dma_rx_finished = true;
840 		context->data_rx_idx = context->rx_data_len;
841 		/* If finished Rx, and need to do bit convert */
842 		if (context->is_lsb == true) {
843 #ifndef SEDI_CONFIG_ARCH_X86
844 			/* Invalidate cache */
845 			sedi_core_inv_dcache_by_addr(
846 				(uint32_t *)(context->data_rx),
847 				context->rx_data_len);
848 #endif
849 			spi_bit_reverse(context->data_rx, context->rx_data_len,
850 					context->frame_size);
851 			sedi_core_clean_dcache_by_addr(
852 				(uint32_t *)(context->data_rx),
853 				context->rx_data_len);
854 		}
855 	}
856 
857 	if ((context->dma_tx_finished == false) ||
858 		(context->dma_rx_finished == false)) {
859 		return;
860 	}
861 
862 	/* If need to start another DMA transfer */
863 	context->dma_idx -= 1;
864 	if (context->dma_idx > 0) {
865 		if (context->dma_idx == 1) {
866 			len = context->last_dma_counts;
867 		}
868 		/* According to different transfer mode, do different fill or receive */
869 		if (context->transfer_mode == SEDI_RBFV(SPI, CTRLR0, TMOD, TX_ONLY)) {
870 			context->data_tx += SEDI_DMA_PERIPH_MAX_SIZE;
871 			context->dma_tx_finished = false;
872 			/* start dma first */
873 			config_and_enable_dma_channel(spi_device, context->tx_dma,
874 						      context->dma_handshake, context->tx_channel,
875 						      0, 1, (uint32_t)(context->data_tx),
876 						      lld_spi_dr_address(context->base), len, true);
877 			sedi_dma_start_transfer(context->tx_dma, context->tx_channel,
878 						(uint32_t)(context->data_tx),
879 						lld_spi_dr_address(context->base), len);
880 
881 		} else if (context->transfer_mode == SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) {
882 			context->data_rx += SEDI_DMA_PERIPH_MAX_SIZE;
883 			context->dma_rx_finished = false;
884 			/* Configure rx channel */
885 			context->base->ctrlr1 = len / context->frame_size - 1;
886 			sedi_dma_start_transfer(context->rx_dma, context->rx_channel,
887 						lld_spi_dr_address(context->base),
888 						(uint32_t)(context->data_rx), len);
889 			config_and_enable_dma_channel(spi_device, context->rx_dma,
890 						      context->rx_handshake, context->rx_channel, 0,
891 						      1, lld_spi_dr_address(context->base),
892 						      (uint32_t)(context->data_rx), len, false);
893 
894 		} else {
895 			context->data_tx += SEDI_DMA_PERIPH_MAX_SIZE;
896 			context->data_rx += SEDI_DMA_PERIPH_MAX_SIZE;
897 			context->dma_tx_finished = false;
898 			context->dma_rx_finished = false;
899 			/* Enable both channel to do transfer */
900 			config_and_enable_dma_channel(spi_device, context->tx_dma,
901 						      context->dma_handshake, context->tx_channel,
902 						      0, 1, (uint32_t)(context->data_tx),
903 						      lld_spi_dr_address(context->base), len, true);
904 			config_and_enable_dma_channel(spi_device, context->rx_dma,
905 						      context->rx_handshake, context->rx_channel, 0,
906 						      1, lld_spi_dr_address(context->base),
907 						      (uint32_t)(context->data_rx), len, false);
908 			/* Enable both channel and start rx firstly to do transfer */
909 			sedi_dma_start_transfer(context->rx_dma, context->rx_channel,
910 						lld_spi_dr_address(context->base),
911 						(uint32_t)(context->data_rx), len);
912 			sedi_dma_start_transfer(context->tx_dma, context->tx_channel,
913 						(uint32_t)(context->data_tx),
914 						lld_spi_dr_address(context->base), len);
915 		}
916 
917 		/* Return to start another transfer */
918 		return;
919 
920 	}
921 
922 	/* All tx and rx finished */
923 	if (context->cb_event) {
924 		context->cb_event(SEDI_SPI_EVENT_COMPLETE, context->cb_param);
925 	}
926 
927 f_out:
928 	/* clear spi busy status and disable spi dma*/
929 	context->status.busy = 0;
930 	lld_spi_config_interrupt(context->base, REG_INT_NONE);
931 	lld_spi_enable(context->base, false);
932 	lld_spi_dma_enable(context->base, false);
933 }
934 #endif
935 
sedi_spi_dma_transfer(IN sedi_spi_t spi_device,IN uint32_t tx_dma,IN uint32_t tx_dma_chan,IN uint8_t * data_out,IN uint32_t rx_dma,IN uint32_t rx_dma_chan,OUT uint8_t * data_in,IN uint32_t num)936 int32_t sedi_spi_dma_transfer(IN sedi_spi_t spi_device, IN uint32_t tx_dma,
937 			      IN uint32_t tx_dma_chan, IN uint8_t *data_out,
938 			      IN uint32_t rx_dma, IN uint32_t rx_dma_chan,
939 			      OUT uint8_t *data_in, IN uint32_t num)
940 {
941 	DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
942 #ifdef SEDI_SPI_USE_DMA
943 	struct spi_context *context = &spi_contexts[spi_device];
944 	sedi_spi_regs_t *spi = context->base;
945 	int tx_handshake = context->dma_handshake;
946 	int rx_handshake = context->rx_handshake;
947 	int width = context->frame_size;
948 	int burst = 1;
949 	uint32_t len = num;
950 
951 	DBG_CHECK(((num % context->frame_size) == 0),
952 		  SEDI_DRIVER_ERROR_PARAMETER);
953 
954 	if (context->status.busy) {
955 		return SEDI_DRIVER_ERROR_BUSY;
956 	}
957 
958 	context->status.busy = 1U;
959 
960 	context->base->dmatdlr = SPI_FIFO_DEPTH - 1;
961 	context->base->dmardlr = 0;
962 	context->tx_dma = tx_dma;
963 	context->rx_dma = rx_dma;
964 	context->tx_channel = tx_dma_chan;
965 	context->rx_channel = rx_dma_chan;
966 	context->dma_tx_finished = false;
967 	context->dma_rx_finished = false;
968 	context->tx_data_len = num;
969 	context->rx_data_len = num;
970 	context->data_tx = (uint8_t *)data_out;
971 	context->data_rx = data_in;
972 	/* DMA BLOCK TS only 4096, for large data more than 4K, use multiple transfer  */
973 	context->last_dma_counts = (num & (SEDI_DMA_PERIPH_MAX_SIZE - 1));
974 	if (context->last_dma_counts == 0) {
975 		context->dma_cycles = num >> SEDI_DMA_PERIPH_MAX_SIZE_SHIFT;
976 		context->last_dma_counts = SEDI_DMA_PERIPH_MAX_SIZE;
977 	} else {
978 		context->dma_cycles = (num >> SEDI_DMA_PERIPH_MAX_SIZE_SHIFT) + 1;
979 	}
980 	context->dma_idx = context->dma_cycles;
981 
982 	if (context->dma_cycles > 1) {
983 		len = SEDI_DMA_PERIPH_MAX_SIZE;
984 	}
985 #ifdef SPI_DW_2_0
986 	/* Clear the bit field */
987 	SEDI_PREG_RBF_SET(SPI, TXFTLR, TFT, 0, &context->base->txftlr);
988 #endif
989 
990 	/* Decide the transfer mode, send, receive or both */
991 	lld_spi_set_transfer_mode(spi_device, data_out, data_in);
992 
993 	/* If need to bit reverse tx buffer */
994 	if (context->is_lsb == true) {
995 		if (context->transfer_mode != SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) {
996 			spi_bit_reverse(context->data_tx, context->tx_data_len,
997 					context->frame_size);
998 			/* Clean the cache for DMA transfer */
999 			sedi_core_clean_dcache_by_addr(
1000 					(uint32_t *)(context->data_tx),
1001 					context->tx_data_len);
1002 		}
1003 #ifdef SEDI_CONFIG_ARCH_X86
1004 		if (context->transfer_mode != SEDI_RBFV(SPI, CTRLR0, TMOD, TX_ONLY)) {
1005 			sedi_core_inv_clean_dcache_by_addr(
1006 					(uint32_t *)(context->data_rx),
1007 					context->rx_data_len);
1008 		}
1009 #endif
1010 	}
1011 
1012 	/* According to different transfer mode, do different fill or receive */
1013 	if (context->transfer_mode == SEDI_RBFV(SPI, CTRLR0, TMOD, TX_ONLY)) {
1014 		/* start dma first */
1015 		config_and_enable_dma_channel(
1016 		    spi_device, tx_dma, tx_handshake, tx_dma_chan, width, burst,
1017 		    (uint32_t)data_out, lld_spi_dr_address(context->base), len,
1018 		    true);
1019 		context->dma_rx_finished = true;
1020 		context->rx_channel = 0xFF;
1021 #ifdef SPI_DW_2_0
1022 		dw_spi_set_start_condition(context, len);
1023 #endif
1024 	} else if (context->transfer_mode == SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) {
1025 		/* Send dummy data first */
1026 		if (context->is_quad == false) {
1027 			lld_spi_fill_fifo(spi_device, context->frame_size, NULL,
1028 					context->frame_size);
1029 		}
1030 
1031 		/* Configure rx channel */
1032 		config_and_enable_dma_channel(spi_device, rx_dma, rx_handshake,
1033 					      rx_dma_chan, width, burst,
1034 					      lld_spi_dr_address(context->base),
1035 					      (uint32_t)data_in, len, false);
1036 		/* Set NDF bits for receive only mode */
1037 		DBG_CHECK((len <= SPI_RECEIVE_MODE_MAX_SIZE),
1038 			  SEDI_DRIVER_ERROR_PARAMETER);
1039 		context->base->ctrlr1 = len / context->frame_size - 1;
1040 		context->dma_tx_finished = true;
1041 		context->tx_channel = 0xFF;
1042 	} else {
1043 		/* Enable both channel to do transfer */
1044 		config_and_enable_dma_channel(
1045 		    spi_device, tx_dma, tx_handshake, tx_dma_chan, width, burst,
1046 		    (uint32_t)data_out, lld_spi_dr_address(context->base), len,
1047 		    true);
1048 		config_and_enable_dma_channel(spi_device, rx_dma, rx_handshake,
1049 					      rx_dma_chan, width, burst,
1050 					      lld_spi_dr_address(context->base),
1051 					      (uint32_t)data_in, len, false);
1052 #ifdef SPI_DW_2_0
1053 		dw_spi_set_start_condition(context, len);
1054 #endif
1055 	}
1056 
1057 	lld_spi_config_interrupt(context->base, REG_INT_ERROR);
1058 
1059 	lld_spi_dma_enable(context->base, true);
1060 
1061 	lld_spi_enable(context->base, true);
1062 
1063 	if ((context->is_quad) && (context->quad_config)) {
1064 		const sedi_spi_enhanced_config_t *config = context->quad_config;
1065 
1066 		/* Instruction need 1 entry */
1067 		SEDI_PREG_SET(SPI, DR0, *(config->inst_buf), &spi->dr0);
1068 		SEDI_PREG_SET(SPI, DR0, *((uint32_t *)(config->addr_buf)), &spi->dr0);
1069 		/* After fill in addr and instruction, no need to keep quad state,
1070 		just transfer data as standard SPI */
1071 		context->is_quad = false;
1072 		context->quad_config = NULL;
1073 	}
1074 
1075 	if (context->transfer_mode == SEDI_RBFV(SPI, CTRLR0, TMOD, TX_ONLY)) {
1076 		sedi_dma_start_transfer(tx_dma, tx_dma_chan, (uint32_t)data_out,
1077 					lld_spi_dr_address(context->base), len);
1078 	} else if (context->transfer_mode == SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) {
1079 		sedi_dma_start_transfer(rx_dma, rx_dma_chan, lld_spi_dr_address(context->base),
1080 					(uint32_t)data_in, len);
1081 	} else {
1082 		/* the SPI transaction may interrupted by some other events between Tx/Rx dma
1083 		 * enable, which probably lead to rx fifo overflow, start rx channel firstly.
1084 		 */
1085 		sedi_dma_start_transfer(rx_dma, rx_dma_chan, lld_spi_dr_address(context->base),
1086 					(uint32_t)data_in, len);
1087 		sedi_dma_start_transfer(tx_dma, tx_dma_chan, (uint32_t)data_out,
1088 			lld_spi_dr_address(context->base), len);
1089 	}
1090 
1091 #endif
1092 	return SEDI_DRIVER_OK;
1093 }
1094 
sedi_spi_poll_transfer(IN sedi_spi_t spi_device,IN uint8_t * data_out,OUT uint8_t * data_in,IN uint32_t num)1095 int32_t sedi_spi_poll_transfer(IN sedi_spi_t spi_device, IN uint8_t *data_out,
1096 			       OUT uint8_t *data_in, IN uint32_t num)
1097 {
1098 	DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
1099 
1100 	uint32_t tx_num = num, rx_num = num, fill_num = 0, receive_num = 0;
1101 	struct spi_context *context = &spi_contexts[spi_device];
1102 
1103 	DBG_CHECK(((num % context->frame_size) == 0),
1104 		  SEDI_DRIVER_ERROR_PARAMETER);
1105 
1106 	if (context->status.busy) {
1107 		return SEDI_DRIVER_ERROR_BUSY;
1108 	}
1109 
1110 	context->status.busy = 1U;
1111 	context->data_tx = (void *)data_out;
1112 	context->data_rx = (void *)data_in;
1113 	context->tx_data_len = num;
1114 	context->rx_data_len = num;
1115 	context->data_tx_idx = 0;
1116 	context->data_rx_idx = 0;
1117 #ifdef SPI_DW_2_0
1118 	/* Clear the bit field */
1119 	SEDI_PREG_RBF_SET(SPI, TXFTLR, TFT, 0, &context->base->txftlr);
1120 #endif
1121 
1122 	/* Decide the transfer mode, send, receive or both */
1123 	lld_spi_set_transfer_mode(spi_device, data_out, data_in);
1124 
1125 	/* First convert tx buffer */
1126 	if ((context->transfer_mode != SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) &&
1127 	    (context->is_lsb == true)) {
1128 		spi_bit_reverse(context->data_tx, context->tx_data_len,
1129 				context->frame_size);
1130 	}
1131 
1132 	/* According to different transfer mode, do different fill or receive */
1133 	if (context->transfer_mode == SEDI_RBFV(SPI, CTRLR0, TMOD, TX_ONLY)) {
1134 		rx_num = 0;
1135 	} else if (context->transfer_mode == SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) {
1136 		tx_num = context->frame_size; /* Shall send at least one data
1137 						 for receive */
1138 		DBG_CHECK((num <= SPI_RECEIVE_MODE_MAX_SIZE),
1139 			  SEDI_DRIVER_ERROR_PARAMETER);
1140 		context->base->ctrlr1 = num / context->frame_size - 1;
1141 	}
1142 
1143 	lld_spi_enable(context->base, true);
1144 
1145 	/* First send some data in both transfer mode */
1146 	fill_num = lld_spi_fill_fifo(spi_device, context->frame_size, data_out,
1147 				     tx_num);
1148 	/* Update buffer and number */
1149 	if (data_out) {
1150 		data_out += fill_num;
1151 	}
1152 	tx_num -= fill_num;
1153 
1154 	while ((tx_num > 0) || (rx_num > 0)) {
1155 		if (tx_num > 0) {
1156 			/* First send some data */
1157 			fill_num = lld_spi_fill_fifo(
1158 			    spi_device, context->frame_size, data_out, tx_num);
1159 			/* Update buffer and number */
1160 			data_out += fill_num;
1161 			tx_num -= fill_num;
1162 		}
1163 
1164 		if (rx_num > 0) {
1165 			/* Receive some data */
1166 			receive_num = lld_spi_receive_fifo(context->base,
1167 							   context->frame_size,
1168 							   data_in, rx_num);
1169 			data_in += receive_num;
1170 			rx_num -= receive_num;
1171 		}
1172 	}
1173 
1174 	/* Waiting for SPI idle */
1175 	SEDI_SPI_POLL_WAIT(lld_spi_is_busy(context->base));
1176 	lld_spi_enable(context->base, false);
1177 
1178 	context->status.busy = 0U;
1179 	context->data_tx_idx = num;
1180 	context->data_rx_idx = num;
1181 
1182 	/* If has rx buffer and need bit reverse */
1183 	if ((context->transfer_mode != SEDI_RBFV(SPI, CTRLR0, TMOD, TX_ONLY)) &&
1184 	    (context->is_lsb == true)) {
1185 		spi_bit_reverse(context->data_rx, context->rx_data_len,
1186 				context->frame_size);
1187 	}
1188 
1189 	/* If need to recover tx buffer */
1190 	if ((context->transfer_mode != SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) &&
1191 	    (context->is_lsb == true)) {
1192 		spi_bit_reverse(context->data_tx, context->tx_data_len,
1193 				context->frame_size);
1194 	}
1195 	return SEDI_DRIVER_OK;
1196 }
1197 
sedi_spi_transfer(IN sedi_spi_t spi_device,IN uint8_t * data_out,OUT uint8_t * data_in,IN uint32_t num)1198 int32_t sedi_spi_transfer(IN sedi_spi_t spi_device, IN uint8_t *data_out,
1199 			  OUT uint8_t *data_in, IN uint32_t num)
1200 {
1201 	DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
1202 
1203 	struct spi_context *context = &spi_contexts[spi_device];
1204 	sedi_spi_regs_t *spi = context->base;
1205 	uint32_t send_count = num;
1206 
1207 	DBG_CHECK(((num % context->frame_size) == 0),
1208 		  SEDI_DRIVER_ERROR_PARAMETER);
1209 
1210 	if (context->status.busy) {
1211 		return SEDI_DRIVER_ERROR_BUSY;
1212 	}
1213 
1214 	/* For transfer size less than watermark */
1215 	if (num < context->rx_watermark * context->frame_size) {
1216 		/* Only shall reset the receive watermark to finish trigger
1217 		 * interrupt */
1218 		lld_spi_set_rx_watermark(context->base,
1219 					 num / context->frame_size);
1220 	} else {
1221 		lld_spi_set_rx_watermark(context->base,
1222 					 (SPI_FIFO_DEPTH / 2 - 1) *
1223 					     context->frame_size);
1224 	}
1225 
1226 	lld_spi_set_transfer_mode(spi_device, data_out, data_in);
1227 
1228 	/* For IRQ mode only, if use multiple buffers, cannot change mode in
1229 	 * transfer */
1230 	if ((context->is_cs_continuous == true) && (!context->is_quad)) {
1231 		SEDI_PREG_RBFV_SET(SPI, CTRLR0, TMOD, TX_AND_RX, &spi->ctrlr0);
1232 		context->transfer_mode = SEDI_RBFV(SPI, CTRLR0, TMOD, TX_AND_RX);
1233 	}
1234 
1235 	context->status.busy = 1U;
1236 
1237 	context->data_tx = (void *)data_out;
1238 	context->data_rx = (void *)data_in;
1239 	context->tx_data_len = num;
1240 	context->rx_data_len = num;
1241 	context->data_tx_idx = 0;
1242 	context->data_rx_idx = 0;
1243 #ifdef SPI_DW_2_0
1244 	/* Clear the bit field */
1245 	SEDI_PREG_RBF_SET(SPI, TXFTLR, TFT, 0, &spi->txftlr);
1246 #endif
1247 
1248 	/* First convert tx buffer */
1249 	if ((context->transfer_mode != SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) &&
1250 	    (context->is_lsb == true)) {
1251 		spi_bit_reverse(context->data_tx, context->tx_data_len,
1252 				context->frame_size);
1253 	}
1254 
1255 	/* According to different transfer mode, do different fill or receive */
1256 	if (context->transfer_mode == SEDI_RBFV(SPI, CTRLR0, TMOD, TX_ONLY)) {
1257 		context->data_rx_idx = num;
1258 	} else if (context->transfer_mode == SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) {
1259 		send_count = context->frame_size;
1260 		DBG_CHECK((num <= SPI_RECEIVE_MODE_MAX_SIZE),
1261 			  SEDI_DRIVER_ERROR_PARAMETER);
1262 		context->base->ctrlr1 = num / context->frame_size - 1;
1263 		/* Write into FIFO needs to enable SPI first */
1264 		lld_spi_enable(context->base, true);
1265 		lld_spi_fill_fifo(spi_device, context->frame_size, data_out,
1266 				  send_count);
1267 		context->data_tx_idx = num;
1268 	}
1269 
1270 #ifdef SPI_DW_2_0
1271 	if (context->transfer_mode != SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) {
1272 		dw_spi_set_start_condition(context, num);
1273 	}
1274 #endif
1275 
1276 	lld_spi_enable(context->base, true);
1277 
1278 	lld_spi_config_interrupt(context->base,
1279 				 REG_INT_TX | REG_INT_RX | REG_INT_ERROR);
1280 
1281 	return SEDI_DRIVER_OK;
1282 }
1283 
spi_enhanced_config(IN sedi_spi_t spi_device,IN sedi_spi_enhanced_config_t * config)1284 static int32_t spi_enhanced_config(IN sedi_spi_t spi_device,
1285 			    IN sedi_spi_enhanced_config_t *config)
1286 {
1287 	struct spi_context *context = &spi_contexts[spi_device];
1288 	sedi_spi_regs_t *spi = context->base;
1289 
1290 	if (SEDI_PREG_RBFV_IS_SET(SPI, CTRLR0, SPI_FRF, STD_SPI_FRF, &spi->ctrlr0)) {
1291 		/* single mode no need to configure */
1292 		return 0;
1293 	}
1294 
1295 	/* inst is must, address is option */
1296 	if ((config->inst_buf == NULL) || (config->inst_len == 0)) {
1297 		return SEDI_DRIVER_ERROR_PARAMETER;
1298 	}
1299 
1300 	context->is_quad = true;
1301 	context->quad_config = config;
1302 
1303 	/* Disable spi first to set registers */
1304 	lld_spi_enable(spi, false);
1305 	/* add SPI_DW_2_0 here as sedi_spi_reg.h osxml has no SPI_CTRL0*/
1306 #ifdef SPI_DW_2_0
1307 	/* Config SPI_CTRL0 register */
1308 	SEDI_PREG_RBF_SET(SPI, SPI_CTRLR0, ADDR_L, config->addr_len, &spi->spi_ctrl0);
1309 	SEDI_PREG_RBF_SET(SPI, SPI_CTRLR0, INST_L, config->inst_len, &spi->spi_ctrl0);
1310 	SEDI_PREG_RBF_SET(SPI, SPI_CTRLR0, WAIT_CYCLES, config->dummy_cycles, &spi->spi_ctrl0);
1311 #endif
1312 
1313 	return 0;
1314 }
1315 
sedi_spi_enhanced_transfer(IN sedi_spi_t spi_device,IN sedi_spi_enhanced_config_t * config)1316 int32_t sedi_spi_enhanced_transfer(IN sedi_spi_t spi_device,
1317 				   IN sedi_spi_enhanced_config_t *config)
1318 {
1319 	DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
1320 
1321 	/*  Handle enhanced spi config */
1322 	spi_enhanced_config(spi_device, config);
1323 
1324 	/* start transfer */
1325 	return sedi_spi_transfer(spi_device, config->tx_buf, config->rx_buf,
1326 				config->data_len);
1327 }
1328 
sedi_spi_dma_enhanced_transfer(IN sedi_spi_t spi_device,IN uint32_t tx_dma,IN uint32_t tx_dma_chan,IN uint32_t rx_dma,IN uint32_t rx_dma_chan,IN sedi_spi_enhanced_config_t * config)1329 int32_t sedi_spi_dma_enhanced_transfer(IN sedi_spi_t spi_device, IN uint32_t tx_dma,
1330 			      IN uint32_t tx_dma_chan, IN uint32_t rx_dma,
1331 			      IN uint32_t rx_dma_chan,
1332 			      IN sedi_spi_enhanced_config_t *config)
1333 {
1334 	DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
1335 
1336 	/*  Handle enhanced spi config */
1337 	spi_enhanced_config(spi_device, config);
1338 
1339 	/* start transfer */
1340 	return sedi_spi_dma_transfer(spi_device, tx_dma, tx_dma_chan,
1341 				    config->tx_buf, rx_dma, rx_dma_chan,
1342 				    config->rx_buf, config->data_len);
1343 }
1344 
sedi_spi_update_tx_buf(IN sedi_spi_t spi_device,IN uint8_t * tx_buf,IN uint32_t len)1345 int32_t sedi_spi_update_tx_buf(IN sedi_spi_t spi_device, IN uint8_t *tx_buf,
1346 			       IN uint32_t len)
1347 {
1348 	DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
1349 
1350 	struct spi_context *context = &spi_contexts[spi_device];
1351 
1352 	DBG_CHECK(((len % context->frame_size) == 0),
1353 		  SEDI_DRIVER_ERROR_PARAMETER);
1354 
1355 	/* This function can only used in continuous mode */
1356 	DBG_CHECK((context->is_cs_continuous == true),
1357 		  SEDI_DRIVER_ERROR_UNSUPPORTED);
1358 
1359 	if (len == 0) {
1360 		return SEDI_DRIVER_ERROR_PARAMETER;
1361 	}
1362 	/* As continuous mode all use both transfer mode, rx also need to update
1363 	 * length */
1364 	context->data_tx = (void *)tx_buf;
1365 	context->tx_data_len += len;
1366 
1367 	return SEDI_DRIVER_OK;
1368 }
1369 
sedi_spi_update_rx_buf(IN sedi_spi_t spi_device,OUT uint8_t * rx_buf,IN uint32_t len)1370 int32_t sedi_spi_update_rx_buf(IN sedi_spi_t spi_device, OUT uint8_t *rx_buf,
1371 			       IN uint32_t len)
1372 {
1373 	DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
1374 
1375 	struct spi_context *context = &spi_contexts[spi_device];
1376 
1377 	DBG_CHECK(((len % context->frame_size) == 0),
1378 		  SEDI_DRIVER_ERROR_PARAMETER);
1379 
1380 	/* This function can only used in continuous mode */
1381 	DBG_CHECK((context->is_cs_continuous == true),
1382 		  SEDI_DRIVER_ERROR_UNSUPPORTED);
1383 
1384 	if (len == 0) {
1385 		return SEDI_DRIVER_ERROR_PARAMETER;
1386 	}
1387 
1388 	/* As continuous mode all use both transfer mode, rx also need to update
1389 	 * length */
1390 	context->data_rx = (void *)rx_buf;
1391 	context->rx_data_len += len;
1392 
1393 	return SEDI_DRIVER_OK;
1394 }
1395 
spi_isr(IN sedi_spi_t spi_device)1396 void spi_isr(IN sedi_spi_t spi_device)
1397 {
1398 	struct spi_context *context = &spi_contexts[spi_device];
1399 	uint32_t intr_stat;
1400 	int error = false;
1401 	int end = false;
1402 	int event;
1403 	int idx;
1404 	uint32_t temp, rx_len;
1405 
1406 	intr_stat = lld_spi_interrupt_clear(context->base);
1407 
1408 	/* To reduce the interrupt times, send/receive as many as possible */
1409 	if (intr_stat & (REG_INT_RX | REG_INT_TX)) {
1410 		while (context->data_tx_idx < context->tx_data_len) {
1411 			temp = context->tx_data_len - context->data_tx_idx;
1412 			idx = lld_spi_fill_fifo(spi_device, context->frame_size,
1413 						context->data_tx, temp);
1414 
1415 			context->data_tx_idx += idx;
1416 			if (context->data_tx != NULL) {
1417 				context->data_tx += idx;
1418 			}
1419 
1420 			if ((context->data_tx_idx == context->tx_data_len) &&
1421 			    (context->cb_event)) {
1422 				context->cb_event(SEDI_SPI_EVENT_TX_FINISHED,
1423 						  context->cb_param);
1424 			}
1425 
1426 			if (idx < temp) {
1427 				/* If last transfer filled FIFO full, break */
1428 				break;
1429 			}
1430 		}
1431 
1432 		while (context->data_rx_idx < context->rx_data_len) {
1433 			rx_len = context->rx_data_len - context->data_rx_idx;
1434 			idx = lld_spi_receive_fifo(context->base,
1435 						   context->frame_size,
1436 						   context->data_rx, rx_len);
1437 
1438 			context->data_rx_idx += idx;
1439 			if (context->data_rx != NULL) {
1440 				context->data_rx += idx;
1441 			}
1442 
1443 			/*Check if need to modify watermark for last transfer*/
1444 			if ((context->rx_data_len - context->data_rx_idx <
1445 			     context->frame_size * context->rx_watermark) &&
1446 			    (context->rx_data_len != context->data_rx_idx)) {
1447 				temp = (context->rx_data_len -
1448 					context->data_rx_idx) /
1449 				       context->frame_size;
1450 				lld_spi_set_rx_watermark(context->base, temp);
1451 				context->rx_watermark = temp;
1452 			}
1453 
1454 			if ((context->data_rx_idx == context->rx_data_len) &&
1455 			    (context->cb_event)) {
1456 				context->cb_event(SEDI_SPI_EVENT_RX_FINISHED,
1457 						  context->cb_param);
1458 			}
1459 
1460 			if (idx < rx_len) {
1461 				/* If last transfer received all data in FIFO,
1462 				 * break */
1463 				break;
1464 			}
1465 		}
1466 	}
1467 
1468 	if ((context->data_rx_idx == context->tx_data_len) &&
1469 	    (context->data_tx_idx == context->rx_data_len)) {
1470 		end = true;
1471 		event = SEDI_SPI_EVENT_COMPLETE;
1472 		/* Wait for Data in FIFO send out while not continuous */
1473 		SEDI_SPI_POLL_WAIT(lld_spi_is_busy(context->base));
1474 
1475 		/* If need to reverse rx buffer */
1476 		if ((context->transfer_mode != SEDI_RBFV(SPI, CTRLR0, TMOD, TX_ONLY)) &&
1477 		    (context->is_lsb == true)) {
1478 			context->data_rx -= context->data_rx_idx;
1479 			spi_bit_reverse(context->data_rx, context->rx_data_len,
1480 					context->frame_size);
1481 		}
1482 		/* If need to recover tx buffer */
1483 		if ((context->transfer_mode != SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) &&
1484 		    (context->is_lsb == true)) {
1485 			context->data_tx -= context->data_tx_idx;
1486 			spi_bit_reverse(context->data_tx, context->tx_data_len,
1487 					context->frame_size);
1488 		}
1489 	}
1490 
1491 	if (intr_stat & REG_INT_ERROR) {
1492 		error = true;
1493 		event = SEDI_SPI_EVENT_DATA_LOST;
1494 		context->status.data_lost = true;
1495 	}
1496 
1497 	if ((error || end) && (context->status.busy != 0)) {
1498 		context->status.busy = 0;
1499 		lld_spi_config_interrupt(context->base, REG_INT_NONE);
1500 		lld_spi_enable(context->base, false);
1501 
1502 		if (context->cb_event) {
1503 			context->cb_event(event, context->cb_param);
1504 		}
1505 	}
1506 }
1507