1 /*
2 * Copyright (c) 2023 Intel Corporation
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include "sedi_driver_pm.h"
8 #include "sedi_driver_core.h"
9 #include "sedi_driver_spi.h"
10 #include "sedi_driver_dma.h"
11 #include "sedi_spi_regs.h"
12
13 #define SEDI_SPI_DRV_VERSION SEDI_DRIVER_VERSION_MAJOR_MINOR(1, 0)
14
15 #define REG_INT_ERROR \
16 (SEDI_RBFVM(SPI, IMR, TXOIM, UNMASKED) | \
17 SEDI_RBFVM(SPI, IMR, RXUIM, UNMASKED) | \
18 SEDI_RBFVM(SPI, IMR, RXOIM, UNMASKED))
19 #define REG_INT_TX \
20 (SEDI_RBFVM(SPI, IMR, TXEIM, UNMASKED))
21 #define REG_INT_RX \
22 (SEDI_RBFVM(SPI, IMR, RXFIM, UNMASKED))
23 #define REG_INT_NONE (0)
24
25 #define SPI_FRAME_SIZE_1_BYTE (1)
26 #define SPI_FRAME_SIZE_2_BYTES (2)
27 #define SPI_RECEIVE_MODE_MAX_SIZE (65536)
28 #define SPI_DMA_MAX_SIZE (4096)
29 #define SPI_DMA_MAX_SIZE_SHIFT (12)
30 #define SSI_IC_FREQ (sedi_pm_get_lbw_clock())
31
32 #define SPI_BITWIDTH_4BITS (SEDI_RBFV(SPI, CTRLR0, DFS, FRAME_04BITS) + 1)
33 #define SPI_BITWIDTH_8BITS (SEDI_RBFV(SPI, CTRLR0, DFS, FRAME_08BITS) + 1)
34 #define SPI_BITWIDTH_16BITS (SEDI_RBFV(SPI, CTRLR0, DFS, FRAME_16BITS) + 1)
35
36 /* Add easy usage for SSI Clock Divider */
37 SEDI_RBFV_DEFINE(SPI, BAUDR, SCKDV, MIN_PRESCALE, 0x2);
38 SEDI_RBFV_DEFINE(SPI, BAUDR, SCKDV, DEFAULT_PRESCALE, 0x14);
39
40 #ifdef SPI_DW_2_0
41 /* ********* SPI SPI_CTRLR0 ***********
42 * SPI Control Register is valid only when SSI_SPI_MODE is either set to
43 * "Dual" or "Quad" or "Octal" mode
44 */
45 SEDI_REG_DEFINE(SPI, SPI_CTRLR0, 0xf4, RW, (uint32_t)0x7fb3f, (uint32_t)0x200);
46 SEDI_RBF_DEFINE(SPI, SPI_CTRLR0, ADDR_L, 2, 4, RW, (uint32_t)0x0);
47 SEDI_RBF_DEFINE(SPI, SPI_CTRLR0, INST_L, 8, 2, RW, (uint32_t)0x2);
48 SEDI_RBF_DEFINE(SPI, SPI_CTRLR0, WAIT_CYCLES, 11, 5, RW, (uint32_t)0x0);
49 /* Notice: there are several specific registers offset of RBF for SPI_DW_2_0
50 * List TFT as a example
51 */
52 SEDI_RBF_DEFINE(SPI, TXFTLR, TFT, 0, 16, RW, (uint32_t)0x0);
53 #endif
54
55 struct spi_context {
56 /* hardware config */
57 sedi_spi_regs_t *base;
58 uint32_t dma_handshake;
59 uint32_t rx_handshake;
60
61 /* sedi required */
62 sedi_spi_capabilities_t capability;
63 sedi_spi_status_t status;
64 sedi_spi_event_cb_t cb_event;
65 void *cb_param;
66
67 /* ioctl info */
68 uint8_t frame_size; /* Frame size in byte */
69 uint8_t tx_watermark;
70 uint8_t rx_watermark;
71 uint32_t prescale;
72 uint32_t dummy_data;
73 bool is_lsb;
74 bool is_cs_continuous;
75
76 /* transfer info */
77 uint8_t transfer_mode;
78 uint8_t *data_tx;
79 uint8_t *data_rx;
80 uint32_t tx_data_len;
81 uint32_t rx_data_len;
82 uint32_t data_tx_idx;
83 uint32_t data_rx_idx;
84
85 /* For dma transfer */
86 bool dma_tx_finished;
87 bool dma_rx_finished;
88 uint32_t tx_dma;
89 uint32_t rx_dma;
90 uint8_t tx_channel;
91 uint8_t rx_channel;
92 uint32_t dma_cycles; /* For large data DMA transfer */
93 uint32_t dma_idx; /* For large data DMA transfer */
94 uint32_t last_dma_counts; /* For large data DMA transfer */
95
96 /* For qspi */
97 bool is_quad;
98 const sedi_spi_enhanced_config_t *quad_config;
99 };
100
101 static const sedi_driver_version_t driver_version = {SEDI_SPI_API_VERSION,
102 SEDI_SPI_DRV_VERSION};
103
104 static sedi_spi_capabilities_t driver_capabilities[SEDI_SPI_NUM] = {0};
105
106 #define SPI_CONTEXT_INIT(x) \
107 { \
108 .base = (sedi_spi_regs_t *)SEDI_IREG_BASE(SPI, x), \
109 .dma_handshake = DMA_HWID_SPI##x##_TX, .dummy_data = 0x00, \
110 .rx_handshake = DMA_HWID_SPI##x##_RX \
111 }
112
113 static struct spi_context spi_contexts[SEDI_SPI_NUM] = { SPI_CONTEXT_INIT(0), SPI_CONTEXT_INIT(1) };
114
115 static const uint8_t bit_reverse_table[] = {
116 0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0,
117 0x30, 0xB0, 0x70, 0xF0, 0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8,
118 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8, 0x04, 0x84, 0x44, 0xC4,
119 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4,
120 0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC,
121 0x3C, 0xBC, 0x7C, 0xFC, 0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2,
122 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2, 0x0A, 0x8A, 0x4A, 0xCA,
123 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA,
124 0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6,
125 0x36, 0xB6, 0x76, 0xF6, 0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE,
126 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE, 0x01, 0x81, 0x41, 0xC1,
127 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1,
128 0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9,
129 0x39, 0xB9, 0x79, 0xF9, 0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5,
130 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5, 0x0D, 0x8D, 0x4D, 0xCD,
131 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD,
132 0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3,
133 0x33, 0xB3, 0x73, 0xF3, 0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB,
134 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB, 0x07, 0x87, 0x47, 0xC7,
135 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7,
136 0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF,
137 0x3F, 0xBF, 0x7F, 0xFF
138 };
139
msb_lsb_convert_8bits(uint8_t * val,uint32_t len)140 static void msb_lsb_convert_8bits(uint8_t *val, uint32_t len)
141 {
142 uint32_t i;
143 uint8_t idx;
144
145 for (i = 0; i < len; i++) {
146 idx = val[i];
147 val[i] = bit_reverse_table[idx];
148 }
149 }
150
msb_lsb_convert_16bits(uint16_t * val,uint32_t len)151 static void msb_lsb_convert_16bits(uint16_t *val, uint32_t len)
152 {
153 uint32_t i;
154 uint16_t idx;
155
156 for (i = 0; i < len; i++) {
157 idx = val[i];
158 val[i] = (bit_reverse_table[idx & 0xFF] << 8U) |
159 (bit_reverse_table[(idx & 0xFF00) >> 8U]);
160 }
161 }
162
lld_spi_enable(sedi_spi_regs_t * spi,bool enable)163 static inline void lld_spi_enable(sedi_spi_regs_t *spi, bool enable)
164 {
165 uint32_t val = enable ? SEDI_RBFV(SPI, SSIENR, SSI_EN, ENABLED) :
166 SEDI_RBFV(SPI, SSIENR, SSI_EN, DISABLE);
167
168 if (SEDI_PREG_RBFV_GET(SPI, SSIENR, SSI_EN, &spi->ssienr) == val) {
169 return;
170 }
171 /* prevent pending interrupt */
172 spi->imr = 0;
173
174 SEDI_PREG_RBF_SET(SPI, SSIENR, SSI_EN, val, &spi->ssienr);
175
176 while (SEDI_PREG_RBFV_GET(SPI, SSIENR, SSI_EN, &spi->ssienr) != val)
177 ;
178 }
179
lld_spi_dma_enable(sedi_spi_regs_t * spi,bool enable)180 static inline void lld_spi_dma_enable(sedi_spi_regs_t *spi, bool enable)
181 {
182 if (enable) {
183 SEDI_PREG_RBFV_SET(SPI, DMACR, TDMAE, ENABLED, &spi->dmacr);
184 SEDI_PREG_RBFV_SET(SPI, DMACR, RDMAE, ENABLED, &spi->dmacr);
185 } else {
186 SEDI_PREG_RBFV_SET(SPI, DMACR, TDMAE, DISABLE, &spi->dmacr);
187 SEDI_PREG_RBFV_SET(SPI, DMACR, RDMAE, DISABLE, &spi->dmacr);
188 }
189 }
190
lld_spi_config_interrupt(sedi_spi_regs_t * spi,uint32_t mask)191 static inline void lld_spi_config_interrupt(sedi_spi_regs_t *spi, uint32_t mask)
192 {
193 SEDI_PREG_SET(SPI, IMR, mask, &spi->imr);
194 }
195
lld_spi_is_busy(sedi_spi_regs_t * spi)196 static inline bool lld_spi_is_busy(sedi_spi_regs_t *spi)
197 {
198 return (SEDI_PREG_RBFV_IS_SET(SPI, SR, BUSY, ACTIVE, &spi->sr)) ||
199 (SEDI_PREG_RBFV_IS_SET(SPI, SR, TFE, NOT_EMPTY, &spi->sr)) ? true : false;
200 }
201
lld_spi_is_enabled(sedi_spi_regs_t * spi)202 static inline bool lld_spi_is_enabled(sedi_spi_regs_t *spi)
203 {
204 return SEDI_PREG_RBFV_GET(SPI, SSIENR, SSI_EN, &spi->ssienr) ? true : false;
205 }
206
lld_spi_interrupt_clear(sedi_spi_regs_t * spi)207 static inline uint32_t lld_spi_interrupt_clear(sedi_spi_regs_t *spi)
208 {
209 uint32_t tmp;
210 uint32_t isr;
211
212 PARAM_UNUSED(tmp);
213
214 isr = spi->isr;
215 tmp = SEDI_PREG_RBFV_GET(SPI, ICR, ICR, &spi->icr);
216
217 /* Clear all error interrupt by read*/
218 tmp = SEDI_PREG_RBFV_GET(SPI, TXOICR, TXOICR, &spi->txoicr);
219 tmp = SEDI_PREG_RBFV_GET(SPI, RXOICR, RXOICR, &spi->rxoicr);
220 tmp = SEDI_PREG_RBFV_GET(SPI, RXUICR, RXUICR, &spi->rxuicr);
221
222 return isr;
223
224 }
225
lld_spi_default_config(sedi_spi_t spi_device)226 static int lld_spi_default_config(sedi_spi_t spi_device)
227 {
228 struct spi_context *context = &spi_contexts[spi_device];
229 sedi_spi_regs_t *spi = context->base;
230
231 uint32_t watermark = SPI_FIFO_DEPTH / 2 - 1;
232
233 uint8_t loopback = SEDI_RBFV(SPI, CTRLR0, SRL, NORMAL_MODE);
234 /* DFS: Data Frame size only valid when SSI_MAX_XFER_SIZE is configured to
235 * 16, if SSI_MAX_XFER_SIZE is configured to 32, then writing to this field
236 * will not have any effect
237 * DFS_32: only valid when SSI_MAX_XFER_SIZE is configured to 32
238 */
239 uint8_t width = SEDI_RBFV(SPI, CTRLR0, DFS_32, FRAME_08BITS);
240 uint8_t cs_mask = SEDI_RBFV(SPI, SER, SER, SELECTED);
241 uint32_t prescale = SEDI_RBFV(SPI, BAUDR, SCKDV, DEFAULT_PRESCALE);
242
243 /* Disable SPI first */
244 lld_spi_enable(spi, false);
245
246 /* Set default SPI watermark */
247 SEDI_PREG_RBF_SET(SPI, TXFTLR, TFT, watermark, &spi->txftlr);
248 SEDI_PREG_RBF_SET(SPI, RXFTLR, RFT, watermark, &spi->rxftlr);
249 SEDI_PREG_RBF_SET(SPI, DMATDLR, DMATDL, watermark, &spi->dmatdlr);
250 SEDI_PREG_RBF_SET(SPI, DMARDLR, DMARDL, watermark, &spi->dmardlr);
251
252 SEDI_PREG_RBF_SET(SPI, CTRLR0, DFS_32, width, &spi->ctrlr0);
253 SEDI_PREG_RBF_SET(SPI, CTRLR0, SRL, loopback, &spi->ctrlr0);
254 SEDI_PREG_RBF_SET(SPI, CTRLR0, SSTE, 0, &spi->ctrlr0);
255 SEDI_PREG_RBF_SET(SPI, BAUDR, SCKDV, prescale, &spi->baudr);
256 SEDI_PREG_RBF_SET(SPI, SER, SER, cs_mask, &spi->ser);
257
258 /* Update context default settings */
259 context->tx_watermark = watermark + 1U;
260 context->rx_watermark = watermark + 1U;
261 context->prescale = prescale;
262 context->frame_size = 1U;
263 context->is_lsb = false;
264
265 return 0;
266
267 }
268
lld_spi_config_cpol_cpha(sedi_spi_regs_t * spi,int cpol,int cpha)269 static inline void lld_spi_config_cpol_cpha(sedi_spi_regs_t *spi, int cpol, int cpha)
270 {
271 cpol = cpol ? SEDI_RBFV(SPI, CTRLR0, SCPOL, SCLK_HIGH) :
272 SEDI_RBFV(SPI, CTRLR0, SCPOL, SCLK_LOW);
273 cpha = cpha ? SEDI_RBFV(SPI, CTRLR0, SCPH, SCPH_START) :
274 SEDI_RBFV(SPI, CTRLR0, SCPH, SCPH_MIDDLE);
275
276 SEDI_PREG_RBF_SET(SPI, CTRLR0, SCPH, cpol, &spi->ctrlr0);
277 SEDI_PREG_RBF_SET(SPI, CTRLR0, SCPOL, cpha, &spi->ctrlr0);
278 }
279
lld_spi_config_loopback(sedi_spi_regs_t * spi,int loopback)280 static inline void lld_spi_config_loopback(sedi_spi_regs_t *spi, int loopback)
281 {
282 loopback = loopback ? SEDI_RBFV(SPI, CTRLR0, SRL, TESTING_MODE) :
283 SEDI_RBFV(SPI, CTRLR0, SRL, NORMAL_MODE);
284 SEDI_PREG_RBF_SET(SPI, CTRLR0, SRL, loopback, &spi->ctrlr0);
285 }
286
lld_spi_config_prescale(sedi_spi_regs_t * spi,uint32_t prescale)287 static inline void lld_spi_config_prescale(sedi_spi_regs_t *spi, uint32_t prescale)
288 {
289 SEDI_PREG_RBF_SET(SPI, BAUDR, SCKDV, prescale, &spi->baudr);
290 }
291
lld_spi_config_width(sedi_spi_regs_t * spi,uint8_t width)292 static inline void lld_spi_config_width(sedi_spi_regs_t *spi, uint8_t width)
293 {
294 /* DFS: Data Frame size only valid when SSI_MAX_XFER_SIZE is configured to
295 * 16, if SSI_MAX_XFER_SIZE is configured to 32, then writing to this field
296 * will not have any effect
297 * DFS_32: only valid when SSI_MAX_XFER_SIZE is configured to 32
298 */
299 SEDI_PREG_RBF_SET(SPI, CTRLR0, DFS_32, width - 1, &spi->ctrlr0);
300 }
301
lld_spi_set_tx_watermark(sedi_spi_regs_t * spi,uint32_t watermark)302 static inline void lld_spi_set_tx_watermark(sedi_spi_regs_t *spi, uint32_t watermark)
303 {
304 SEDI_PREG_RBF_SET(SPI, TXFTLR, TFT, watermark - 1, &spi->txftlr);
305 }
306
lld_spi_set_rx_watermark(sedi_spi_regs_t * spi,uint32_t watermark)307 static inline void lld_spi_set_rx_watermark(sedi_spi_regs_t *spi, uint32_t watermark)
308 {
309 SEDI_PREG_RBF_SET(SPI, RXFTLR, RFT, watermark - 1, &spi->rxftlr);
310 }
311
lld_spi_config_cs(sedi_spi_regs_t * spi,uint32_t cs_mask)312 static inline void lld_spi_config_cs(sedi_spi_regs_t *spi, uint32_t cs_mask)
313 {
314 SEDI_PREG_RBF_SET(SPI, SER, SER, cs_mask, &spi->ser);
315 }
316
lld_spi_set_transfer_mode(sedi_spi_t spi_device,IN uint8_t * data_out,OUT uint8_t * data_in)317 static void lld_spi_set_transfer_mode(sedi_spi_t spi_device,
318 IN uint8_t *data_out,
319 OUT uint8_t *data_in)
320 {
321 struct spi_context *context = &spi_contexts[spi_device];
322 sedi_spi_regs_t *spi = context->base;
323
324 if (data_out == NULL) {
325 /* Set to receive only mode */
326 SEDI_PREG_RBFV_SET(SPI, CTRLR0, TMOD, RX_ONLY, &spi->ctrlr0);
327 context->transfer_mode = SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY);
328 } else if (data_in == NULL) {
329 /* Set to transmit only mode */
330 SEDI_PREG_RBFV_SET(SPI, CTRLR0, TMOD, TX_ONLY, &spi->ctrlr0);
331 context->transfer_mode = SEDI_RBFV(SPI, CTRLR0, TMOD, TX_ONLY);
332 } else {
333 SEDI_PREG_RBFV_SET(SPI, CTRLR0, TMOD, TX_AND_RX, &spi->ctrlr0);
334 context->transfer_mode = SEDI_RBFV(SPI, CTRLR0, TMOD, TX_AND_RX);
335 }
336
337 }
338
lld_spi_fill_fifo(sedi_spi_t spi_device,uint8_t frame_size,IN uint8_t * buff,uint32_t count)339 static int lld_spi_fill_fifo(sedi_spi_t spi_device, uint8_t frame_size,
340 IN uint8_t *buff, uint32_t count)
341 {
342 struct spi_context *context = &spi_contexts[spi_device];
343 sedi_spi_regs_t *spi = context->base;
344 uint32_t size = 0;
345 uint32_t data = 0;
346
347 /* In quad mode, need to send opcode and addr first */
348 if ((context->is_quad) && (context->quad_config)) {
349 const sedi_spi_enhanced_config_t *config = context->quad_config;
350
351 /* Instruction need 1 entry */
352 SEDI_PREG_SET(SPI, DR0, *(config->inst_buf), &spi->dr0);
353 SEDI_PREG_SET(SPI, DR0, *((uint32_t *)(config->addr_buf)), &spi->dr0);
354 /* After fill in addr and instruction, no need to keep quad state,
355 just transfer data as standard SPI */
356 context->is_quad = false;
357 context->quad_config = NULL;
358 if (context->transfer_mode == SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) {
359 return 0;
360 }
361 }
362
363 /* Check how many empty entry in FIFO */
364 size = (SPI_FIFO_DEPTH - spi->txflr) * frame_size;
365
366 /* Get the number which can be filled to fifo */
367 size = (count > size) ? size : count;
368 /* Used to return the actual fill size in bytes */
369 count = size;
370 while (size) {
371 /* Get the data in a FIFO entry */
372 if (buff) {
373 switch (frame_size) {
374 case SPI_FRAME_SIZE_1_BYTE:
375 data = (uint32_t)(*buff);
376 break;
377 case SPI_FRAME_SIZE_2_BYTES:
378 data = (uint32_t)(*(uint16_t *)buff);
379 break;
380 default:
381 break;
382 }
383 /* Update size */
384 buff += frame_size;
385 } else {
386 data = context->dummy_data;
387 }
388 /* Write data */
389 SEDI_PREG_SET(SPI, DR0, data, &spi->dr0);
390 size -= frame_size;
391 }
392
393 return count;
394 }
395
lld_spi_receive_fifo(sedi_spi_regs_t * spi,uint8_t frame_size,OUT uint8_t * buff,uint32_t count)396 static int lld_spi_receive_fifo(sedi_spi_regs_t *spi, uint8_t frame_size,
397 OUT uint8_t *buff, uint32_t count)
398 {
399 uint32_t size = spi->rxflr * frame_size;
400 uint32_t data;
401
402 /* Get the number which can be filled to fifo */
403 size = (count > size) ? size : count;
404 /* Used to return the actual fill size in bytes */
405 count = size;
406 while (size) {
407 /* Get the data in a FIFO entry */
408 data = SEDI_PREG_GET(SPI, DR0, &spi->dr0);
409 if (buff) {
410 switch (frame_size) {
411 case SPI_FRAME_SIZE_1_BYTE:
412 *buff = (data & 0xFF);
413 break;
414 case SPI_FRAME_SIZE_2_BYTES:
415 buff[0] = (data & 0xFF);
416 buff[1] = ((data >> 8U) & 0xFF);
417 break;
418 default:
419 break;
420 }
421 /* Update size and buff */
422 buff += frame_size;
423 }
424 size -= frame_size;
425 }
426
427 return count;
428 }
429
lld_spi_dr_address(sedi_spi_regs_t * spi)430 static inline uint32_t lld_spi_dr_address(sedi_spi_regs_t *spi)
431 {
432 #ifdef SEDI_SPI_USE_DMA
433 uint32_t ret = SEDI_SPI_0_REG_DMA + (uint32_t)&((sedi_spi_regs_t *)0)->dr0;
434 return ret;
435 #else
436 return (uint32_t)&spi->dr0;
437 #endif
438 }
439
spi_bit_reverse(uint8_t * val,uint32_t len,uint8_t frame_size)440 static void spi_bit_reverse(uint8_t *val, uint32_t len, uint8_t frame_size)
441 {
442 if (frame_size == SPI_FRAME_SIZE_1_BYTE) {
443 msb_lsb_convert_8bits(val, len);
444 } else {
445 msb_lsb_convert_16bits((uint16_t *)val, len / frame_size);
446 }
447 }
448
lld_spi_set_ti_mode(sedi_spi_regs_t * spi)449 static void lld_spi_set_ti_mode(sedi_spi_regs_t *spi)
450 {
451 if (lld_spi_is_enabled(spi) == true) {
452 lld_spi_enable(spi, false);
453 }
454 SEDI_PREG_RBFV_SET(SPI, CTRLR0, FRF, TEXAS_SSP, &spi->ctrlr0);
455 }
456
lld_spi_set_microwire_mode(sedi_spi_regs_t * spi,sedi_spi_microwire_config_t * config)457 static void lld_spi_set_microwire_mode(sedi_spi_regs_t *spi,
458 sedi_spi_microwire_config_t *config)
459 {
460 if (lld_spi_is_enabled(spi) == true) {
461 lld_spi_enable(spi, false);
462 }
463
464 SEDI_PREG_RBFV_SET(SPI, CTRLR0, FRF, NS_MICROWIRE, &spi->ctrlr0);
465
466 /* Configure microwire mode */
467 SEDI_PREG_RBF_SET(SPI, MWCR, MHS, config->microwire_handshake, &spi->mwcr);
468 SEDI_PREG_RBF_SET(SPI, MWCR, MDD, config->data_direction_tx, &spi->mwcr);
469 SEDI_PREG_RBF_SET(SPI, MWCR, MWMOD, config->sequential_mode, &spi->mwcr);
470 }
471
lld_spi_set_line_mode(sedi_spi_regs_t * spi,spi_line_mode_t mode)472 static void lld_spi_set_line_mode(sedi_spi_regs_t *spi, spi_line_mode_t mode)
473 {
474 /* SPI_FRF: SPI Frame Format Bits RO and only valid when SSI_SPI_MODE is
475 * either set to "Dual" or "Quad" or "Octal" mode, so add #ifdef SPI_DW_2_0
476 */
477 #ifdef SPI_DW_2_0
478 lld_spi_enable(spi, false);
479
480 SEDI_PREG_RBF_SET(SPI, CTRLR0, SPI_FRF, mode, &spi->ctrlr0);
481 #endif
482 }
483
484 #ifdef SPI_DW_2_0
dw_spi_set_start_condition(struct spi_context * context,uint32_t num)485 static void dw_spi_set_start_condition(struct spi_context *context, uint32_t num)
486 {
487 sedi_spi_regs_t *spi = context->base;
488 uint32_t start_frame = 0;
489
490 /* Set the send start condition to improve efficiency */
491 if (context->quad_config) {
492 /* enhanced mode includes 2 frames for opcode and addr */
493 start_frame = num / (context->frame_size) + 2;
494 } else {
495 start_frame = num / (context->frame_size);
496 }
497
498 /* Compare with FIFO depth */
499 if (start_frame < SPI_FIFO_DEPTH) {
500 SEDI_PREG_RBF_SET(SPI, TXFTLR, TFT, start_frame - 1, &spi->txftlr);
501 } else {
502 SEDI_PREG_RBF_SET(SPI, TXFTLR, TFT, SPI_FIFO_DEPTH - 1, &spi->txftlr);
503 }
504 }
505
506 #endif
507 /******************************************************************************
508 * SEDI interface
509 *****************************************************************************/
510
sedi_spi_get_version(void)511 sedi_driver_version_t sedi_spi_get_version(void)
512 {
513 return driver_version;
514 }
515
sedi_spi_get_capabilities(IN sedi_spi_t spi_device,sedi_spi_capabilities_t * cap)516 int32_t sedi_spi_get_capabilities(IN sedi_spi_t spi_device,
517 sedi_spi_capabilities_t *cap)
518 {
519 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
520
521 if (sedi_dev_is_self_owned(SEDI_DEVID_SPI0 + spi_device)) {
522 driver_capabilities[spi_device].is_available = 1;
523 } else {
524 driver_capabilities[spi_device].is_available = 0;
525 }
526
527 driver_capabilities[spi_device].ti_ssi = 1;
528 driver_capabilities[spi_device].microwire = 1;
529 driver_capabilities[spi_device].mode_fault = 0;
530
531 *cap = driver_capabilities[spi_device];
532
533 return SEDI_DRIVER_OK;
534 }
535
sedi_spi_init(IN sedi_spi_t spi_device,IN sedi_spi_event_cb_t cb_event,INOUT void * param,IN uint32_t base)536 int32_t sedi_spi_init(IN sedi_spi_t spi_device, IN sedi_spi_event_cb_t cb_event,
537 INOUT void *param, IN uint32_t base)
538 {
539 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
540
541 struct spi_context *context = &spi_contexts[spi_device];
542
543 if (!sedi_dev_is_self_owned(SEDI_DEVID_SPI0 + spi_device)) {
544 return SEDI_DRIVER_ERROR_NO_DEV;
545 }
546
547 context->cb_event = cb_event;
548 context->cb_param = param;
549
550 context->base = (sedi_spi_regs_t *)base;
551
552 return SEDI_DRIVER_OK;
553 }
554
sedi_spi_uninit(IN sedi_spi_t spi_device)555 int32_t sedi_spi_uninit(IN sedi_spi_t spi_device)
556 {
557 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
558
559 struct spi_context *context = &spi_contexts[spi_device];
560
561 context->cb_event = NULL;
562 context->is_lsb = false;
563
564 return SEDI_DRIVER_OK;
565 }
566
sedi_spi_set_power(IN sedi_spi_t spi_device,IN sedi_power_state_t state)567 int32_t sedi_spi_set_power(IN sedi_spi_t spi_device,
568 IN sedi_power_state_t state)
569 {
570 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
571 sedi_devid_t id = SEDI_DEVID_SPI0 + spi_device;
572 struct spi_context *context = &spi_contexts[spi_device];
573 int32_t ret = SEDI_DRIVER_OK;
574
575 switch (state) {
576 case SEDI_POWER_FULL:
577 sedi_pm_set_device_power(id, state);
578 lld_spi_default_config(spi_device);
579 break;
580 case SEDI_POWER_SUSPEND:
581 case SEDI_POWER_FORCE_SUSPEND:
582 case SEDI_POWER_LOW:
583 lld_spi_enable(context->base, false);
584 sedi_pm_set_device_power(id, state);
585 break;
586 case SEDI_POWER_OFF:
587 default:
588 ret = SEDI_DRIVER_ERROR_UNSUPPORTED;
589 break;
590 }
591
592 return ret;
593 }
594
sedi_spi_get_data_count(IN sedi_spi_t spi_device)595 int32_t sedi_spi_get_data_count(IN sedi_spi_t spi_device)
596 {
597 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
598
599 struct spi_context *context = &spi_contexts[spi_device];
600
601 if (context->data_tx) {
602 return context->data_tx_idx;
603 } else {
604 return context->data_rx_idx;
605 }
606 }
607
sedi_spi_get_status(IN sedi_spi_t spi_device,sedi_spi_status_t * status)608 int32_t sedi_spi_get_status(IN sedi_spi_t spi_device, sedi_spi_status_t *status)
609 {
610 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
611 DBG_CHECK(NULL != status, SEDI_DRIVER_ERROR_PARAMETER);
612
613 struct spi_context *context = &spi_contexts[spi_device];
614 sedi_spi_regs_t *reg = context->base;
615
616 status->busy = context->status.busy;
617 status->data_lost = context->status.data_lost;
618 status->mode_fault = context->status.mode_fault;
619 status->isr = reg->isr;
620 status->sr = reg->sr;
621 status->txflr = reg->txflr;
622 status->rxflr = reg->rxflr;
623
624 return SEDI_DRIVER_OK;
625 }
626
sedi_spi_control(IN sedi_spi_t spi_device,IN uint32_t control,IN uint32_t arg)627 int32_t sedi_spi_control(IN sedi_spi_t spi_device, IN uint32_t control,
628 IN uint32_t arg)
629 {
630 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
631 DBG_CHECK(control < SEDI_SPI_IOCTL_NUM, SEDI_DRIVER_ERROR_PARAMETER);
632
633 struct spi_context *context = &spi_contexts[spi_device];
634
635 switch (control) {
636 case SEDI_SPI_IOCTL_CPOL0_CPHA0:
637 lld_spi_config_cpol_cpha(context->base, 0, 0);
638 break;
639 case SEDI_SPI_IOCTL_CPOL0_CPHA1:
640 lld_spi_config_cpol_cpha(context->base, 0, 1);
641 break;
642 case SEDI_SPI_IOCTL_CPOL1_CPHA0:
643 lld_spi_config_cpol_cpha(context->base, 1, 0);
644 break;
645 case SEDI_SPI_IOCTL_CPOL1_CPHA1:
646 lld_spi_config_cpol_cpha(context->base, 1, 1);
647 break;
648 case SEDI_SPI_IOCTL_TI_SSI:
649 lld_spi_set_ti_mode(context->base);
650 break;
651 case SEDI_SPI_IOCTL_MICROWIRE:
652 lld_spi_set_microwire_mode(context->base,
653 (sedi_spi_microwire_config_t *)arg);
654 break;
655 case SEDI_SPI_IOCTL_MSB:
656 context->is_lsb = false;
657 break;
658 case SEDI_SPI_IOCTL_LSB:
659 context->is_lsb = true;
660 break;
661 case SEDI_SPI_IOCTL_DATA_WIDTH:
662 DBG_CHECK(((arg == SPI_BITWIDTH_4BITS) ||
663 (arg == SPI_BITWIDTH_8BITS) ||
664 (arg == SPI_BITWIDTH_16BITS)),
665 SEDI_DRIVER_ERROR_PARAMETER);
666 context->frame_size = (uint8_t)arg / 8U;
667 /* For 4 bits operation, operate like 8 bits */
668 if (context->frame_size == 0) {
669 context->frame_size = SPI_FRAME_SIZE_1_BYTE;
670 }
671 lld_spi_config_width(context->base, (uint8_t)arg);
672 break;
673 case SEDI_SPI_IOCTL_CS_UNUSED:
674 lld_spi_config_cs(context->base, 0U);
675 break;
676 case SEDI_SPI_IOCTL_CS_HW:
677 lld_spi_config_cs(context->base, (uint32_t)arg);
678 break;
679 case SEDI_SPI_IOCTL_SPEED_SET:
680 context->prescale = SSI_IC_FREQ / (uint32_t)arg;
681 if (context->prescale < SEDI_RBFV(SPI, BAUDR, SCKDV, MIN_PRESCALE)) {
682 context->prescale = SEDI_RBFV(SPI, BAUDR, SCKDV, MIN_PRESCALE);
683 }
684 lld_spi_config_prescale(context->base, context->prescale);
685 break;
686 case SEDI_SPI_IOCTL_TX_WATERMARK_SET:
687 context->tx_watermark = (uint32_t)arg;
688 lld_spi_set_tx_watermark(context->base, (uint32_t)arg);
689 break;
690 case SEDI_SPI_IOCTL_RX_WATERMARK_SET:
691 context->rx_watermark = (uint32_t)arg;
692 lld_spi_set_rx_watermark(context->base, (uint32_t)arg);
693 break;
694 case SEDI_SPI_IOCTL_DUMMY_DATA:
695 context->dummy_data = (uint32_t)arg;
696 break;
697 case SEDI_SPI_IOCTL_LOOPBACK:
698 lld_spi_config_loopback(context->base, (bool)arg);
699 break;
700 case SEDI_SPI_IOCTL_SPEED_GET:
701 *((uint32_t *)arg) = SSI_IC_FREQ / context->prescale;
702 break;
703 case SEDI_SPI_IOCTL_TX_WATERMARK_GET:
704 *((uint32_t *)arg) = context->tx_watermark;
705 break;
706 case SEDI_SPI_IOCTL_RX_WATERMARK_GET:
707 *((uint32_t *)arg) = context->rx_watermark;
708 break;
709 case SEDI_SPI_IOCTL_ABORT:
710 lld_spi_enable(context->base, false);
711 lld_spi_config_interrupt(context->base, REG_INT_NONE);
712 #ifdef SEDI_SPI_USE_DMA
713 lld_spi_dma_enable(context->base, false);
714 #endif
715 context->status.busy = 0;
716 break;
717 case SEDI_SPI_IOCTL_BUFFER_SETS:
718 context->is_cs_continuous = (bool)arg;
719 break;
720 case SEDI_SPI_IOCTL_LINE_MODE:
721 lld_spi_set_line_mode(context->base, (spi_line_mode_t)arg);
722 break;
723 default:
724 break;
725 }
726
727 return SEDI_DRIVER_OK;
728 }
729
730 #ifdef SEDI_SPI_USE_DMA
731 static void callback_dma_transfer(const sedi_dma_t dma, const int chan,
732 const int event, void *param);
733
config_and_enable_dma_channel(sedi_spi_t spi_dev,int dma,int handshake,int chan,int width,int burst,uint32_t src,uint32_t dst,uint32_t len,int is_tx)734 static int config_and_enable_dma_channel(sedi_spi_t spi_dev, int dma,
735 int handshake, int chan, int width,
736 int burst, uint32_t src, uint32_t dst,
737 uint32_t len, int is_tx)
738 {
739 int ret;
740 int dma_dir;
741 int dma_per_dir;
742 dma_transfer_width_t wid = DMA_TRANS_WIDTH_8;
743
744 PARAM_UNUSED(
745 burst); /* Set burst to 1 to finish transfer all data size */
746
747 if (is_tx) {
748 dma_dir = DMA_MEMORY_TO_PERIPHERAL;
749 dma_per_dir = DMA_HS_PER_TX;
750 } else {
751 dma_dir = DMA_PERIPHERAL_TO_MEMORY;
752 dma_per_dir = DMA_HS_PER_RX;
753 }
754
755 switch (width) {
756 case 1:
757 wid = DMA_TRANS_WIDTH_8;
758 break;
759 case 2:
760 wid = DMA_TRANS_WIDTH_16;
761 break;
762 case 4:
763 wid = DMA_TRANS_WIDTH_32;
764 break;
765 default:
766 break;
767 }
768
769 ret = sedi_dma_init(dma, chan, callback_dma_transfer, (void *)spi_dev);
770 DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
771
772 ret = sedi_dma_set_power(dma, chan, SEDI_POWER_FULL);
773 DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
774
775 ret = sedi_dma_control(dma, chan, SEDI_CONFIG_DMA_BURST_LENGTH,
776 DMA_BURST_TRANS_LENGTH_1);
777 DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
778
779 ret = sedi_dma_control(dma, chan, SEDI_CONFIG_DMA_SR_TRANS_WIDTH, wid);
780 DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
781
782 ret = sedi_dma_control(dma, chan, SEDI_CONFIG_DMA_DT_TRANS_WIDTH, wid);
783 DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
784
785 ret = sedi_dma_control(dma, chan, SEDI_CONFIG_DMA_HS_DEVICE_ID,
786 handshake);
787 DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
788
789 ret = sedi_dma_control(dma, chan, SEDI_CONFIG_DMA_HS_POLARITY,
790 DMA_HS_POLARITY_HIGH);
791 DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
792
793 ret = sedi_dma_control(dma, chan, SEDI_CONFIG_DMA_DIRECTION, dma_dir);
794 DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
795
796 ret = sedi_dma_control(dma, chan, SEDI_CONFIG_DMA_HS_DEVICE_ID_PER_DIR,
797 dma_per_dir);
798 DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
799
800
801 return 0;
802 }
803
callback_dma_transfer(const sedi_dma_t dma,const int chan,const int event,void * param)804 static void callback_dma_transfer(const sedi_dma_t dma, const int chan,
805 const int event, void *param)
806 {
807 sedi_spi_t spi_device = (sedi_spi_t)param;
808
809 struct spi_context *context = &spi_contexts[spi_device];
810 uint32_t len = SPI_DMA_MAX_SIZE;
811
812 /* release the dma resource */
813 sedi_dma_set_power(dma, chan, SEDI_POWER_OFF);
814 sedi_dma_uninit(dma, chan);
815
816 if (event != SEDI_DMA_EVENT_TRANSFER_DONE) {
817 if (context->cb_event) {
818 context->cb_event(SEDI_SPI_EVENT_DATA_LOST,
819 context->cb_param);
820 }
821
822 goto f_out;
823 }
824
825 /* See tx or rx finished */
826 if (chan == context->tx_channel) {
827 context->dma_tx_finished = true;
828 context->data_tx_idx = context->tx_data_len;
829 /* Recover LSB reverse, DMA mode tx buff pointer not changed */
830 if (context->is_lsb == true) {
831 spi_bit_reverse(context->data_tx, context->tx_data_len,
832 context->frame_size);
833 sedi_core_clean_dcache_by_addr(
834 (uint32_t *)(context->data_tx),
835 context->tx_data_len);
836 }
837 /* Waiting for TX FIFO empty */
838 while (lld_spi_is_busy(context->base)) {
839 ;
840 }
841 } else if (chan == context->rx_channel) {
842 context->dma_rx_finished = true;
843 context->data_rx_idx = context->rx_data_len;
844 /* If finished Rx, and need to do bit convert */
845 if (context->is_lsb == true) {
846 #ifndef SEDI_CONFIG_ARCH_X86
847 /* Invalidate cache */
848 sedi_core_inv_dcache_by_addr(
849 (uint32_t *)(context->data_rx),
850 context->rx_data_len);
851 #endif
852 spi_bit_reverse(context->data_rx, context->rx_data_len,
853 context->frame_size);
854 sedi_core_clean_dcache_by_addr(
855 (uint32_t *)(context->data_rx),
856 context->rx_data_len);
857 }
858 }
859
860 if ((context->dma_tx_finished == false) ||
861 (context->dma_rx_finished == false)) {
862 return;
863 }
864
865 /* If need to start another DMA transfer */
866 context->dma_idx -= 1;
867 if (context->dma_idx > 0) {
868 if (context->dma_idx == 1) {
869 len = context->last_dma_counts;
870 }
871 /* According to different transfer mode, do different fill or receive */
872 if (context->transfer_mode == SEDI_RBFV(SPI, CTRLR0, TMOD, TX_ONLY)) {
873 context->data_tx += SPI_DMA_MAX_SIZE;
874 context->dma_tx_finished = false;
875 /* start dma first */
876 config_and_enable_dma_channel(spi_device, context->tx_dma,
877 context->dma_handshake, context->tx_channel,
878 0, 1, (uint32_t)(context->data_tx),
879 lld_spi_dr_address(context->base), len, true);
880 sedi_dma_start_transfer(context->tx_dma, context->tx_channel,
881 (uint32_t)(context->data_tx),
882 lld_spi_dr_address(context->base), len);
883
884 } else if (context->transfer_mode == SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) {
885 context->data_rx += SPI_DMA_MAX_SIZE;
886 context->dma_rx_finished = false;
887 /* Configure rx channel */
888 context->base->ctrlr1 = len / context->frame_size - 1;
889 sedi_dma_start_transfer(context->rx_dma, context->rx_channel,
890 lld_spi_dr_address(context->base),
891 (uint32_t)(context->data_rx), len);
892 config_and_enable_dma_channel(spi_device, context->rx_dma,
893 context->rx_handshake, context->rx_channel, 0,
894 1, lld_spi_dr_address(context->base),
895 (uint32_t)(context->data_rx), len, false);
896
897 } else {
898 context->data_tx += SPI_DMA_MAX_SIZE;
899 context->data_rx += SPI_DMA_MAX_SIZE;
900 context->dma_tx_finished = false;
901 context->dma_rx_finished = false;
902 /* Enable both channel to do transfer */
903 config_and_enable_dma_channel(spi_device, context->tx_dma,
904 context->dma_handshake, context->tx_channel,
905 0, 1, (uint32_t)(context->data_tx),
906 lld_spi_dr_address(context->base), len, true);
907 config_and_enable_dma_channel(spi_device, context->rx_dma,
908 context->rx_handshake, context->rx_channel, 0,
909 1, lld_spi_dr_address(context->base),
910 (uint32_t)(context->data_rx), len, false);
911 /* Enable both channel and start rx firstly to do transfer */
912 sedi_dma_start_transfer(context->rx_dma, context->rx_channel,
913 lld_spi_dr_address(context->base),
914 (uint32_t)(context->data_rx), len);
915 sedi_dma_start_transfer(context->tx_dma, context->tx_channel,
916 (uint32_t)(context->data_tx),
917 lld_spi_dr_address(context->base), len);
918 }
919
920 /* Return to start another transfer */
921 return;
922
923 }
924
925 /* All tx and rx finished */
926 if (context->cb_event) {
927 context->cb_event(SEDI_SPI_EVENT_COMPLETE, context->cb_param);
928 }
929
930 f_out:
931 /* clear spi busy status and disable spi dma*/
932 context->status.busy = 0;
933 lld_spi_config_interrupt(context->base, REG_INT_NONE);
934 lld_spi_enable(context->base, false);
935 lld_spi_dma_enable(context->base, false);
936 }
937 #endif
938
sedi_spi_dma_transfer(IN sedi_spi_t spi_device,IN uint32_t tx_dma,IN uint32_t tx_dma_chan,IN uint8_t * data_out,IN uint32_t rx_dma,IN uint32_t rx_dma_chan,OUT uint8_t * data_in,IN uint32_t num)939 int32_t sedi_spi_dma_transfer(IN sedi_spi_t spi_device, IN uint32_t tx_dma,
940 IN uint32_t tx_dma_chan, IN uint8_t *data_out,
941 IN uint32_t rx_dma, IN uint32_t rx_dma_chan,
942 OUT uint8_t *data_in, IN uint32_t num)
943 {
944 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
945 #ifdef SEDI_SPI_USE_DMA
946 struct spi_context *context = &spi_contexts[spi_device];
947 sedi_spi_regs_t *spi = context->base;
948 int tx_handshake = context->dma_handshake;
949 int rx_handshake = context->rx_handshake;
950 int width = context->frame_size;
951 int burst = 1;
952 uint32_t len = num;
953
954 DBG_CHECK(((num % context->frame_size) == 0),
955 SEDI_DRIVER_ERROR_PARAMETER);
956
957 if (context->status.busy) {
958 return SEDI_DRIVER_ERROR_BUSY;
959 }
960
961 context->status.busy = 1U;
962
963 context->base->dmatdlr = SPI_FIFO_DEPTH - 1;
964 context->base->dmardlr = 0;
965 context->tx_dma = tx_dma;
966 context->rx_dma = rx_dma;
967 context->tx_channel = tx_dma_chan;
968 context->rx_channel = rx_dma_chan;
969 context->dma_tx_finished = false;
970 context->dma_rx_finished = false;
971 context->tx_data_len = num;
972 context->rx_data_len = num;
973 context->data_tx = (uint8_t *)data_out;
974 context->data_rx = data_in;
975 /* DMA BLOCK TS only 4096, for large data more than 4K, use multiple transfer */
976 context->last_dma_counts = (num & (SPI_DMA_MAX_SIZE - 1));
977 if (context->last_dma_counts == 0) {
978 context->dma_cycles = num >> SPI_DMA_MAX_SIZE_SHIFT;
979 context->last_dma_counts = SPI_DMA_MAX_SIZE;
980 } else {
981 context->dma_cycles = (num >> SPI_DMA_MAX_SIZE_SHIFT) + 1;
982 }
983 context->dma_idx = context->dma_cycles;
984
985 if (context->dma_cycles > 1) {
986 len = SPI_DMA_MAX_SIZE;
987 }
988 #ifdef SPI_DW_2_0
989 /* Clear the bit field */
990 SEDI_PREG_RBF_SET(SPI, TXFTLR, TFT, 0, &context->base->txftlr);
991 #endif
992
993 /* Decide the transfer mode, send, receive or both */
994 lld_spi_set_transfer_mode(spi_device, data_out, data_in);
995
996 /* If need to bit reverse tx buffer */
997 if (context->is_lsb == true) {
998 if (context->transfer_mode != SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) {
999 spi_bit_reverse(context->data_tx, context->tx_data_len,
1000 context->frame_size);
1001 /* Clean the cache for DMA transfer */
1002 sedi_core_clean_dcache_by_addr(
1003 (uint32_t *)(context->data_tx),
1004 context->tx_data_len);
1005 }
1006 #ifdef SEDI_CONFIG_ARCH_X86
1007 if (context->transfer_mode != SEDI_RBFV(SPI, CTRLR0, TMOD, TX_ONLY)) {
1008 sedi_core_inv_clean_dcache_by_addr(
1009 (uint32_t *)(context->data_rx),
1010 context->rx_data_len);
1011 }
1012 #endif
1013 }
1014
1015 /* According to different transfer mode, do different fill or receive */
1016 if (context->transfer_mode == SEDI_RBFV(SPI, CTRLR0, TMOD, TX_ONLY)) {
1017 /* start dma first */
1018 config_and_enable_dma_channel(
1019 spi_device, tx_dma, tx_handshake, tx_dma_chan, width, burst,
1020 (uint32_t)data_out, lld_spi_dr_address(context->base), len,
1021 true);
1022 context->dma_rx_finished = true;
1023 context->rx_channel = 0xFF;
1024 #ifdef SPI_DW_2_0
1025 dw_spi_set_start_condition(context, len);
1026 #endif
1027 } else if (context->transfer_mode == SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) {
1028 /* Send dummy data first */
1029 if (context->is_quad == false) {
1030 lld_spi_fill_fifo(spi_device, context->frame_size, NULL,
1031 context->frame_size);
1032 }
1033
1034 /* Configure rx channel */
1035 config_and_enable_dma_channel(spi_device, rx_dma, rx_handshake,
1036 rx_dma_chan, width, burst,
1037 lld_spi_dr_address(context->base),
1038 (uint32_t)data_in, len, false);
1039 /* Set NDF bits for receive only mode */
1040 DBG_CHECK((len <= SPI_RECEIVE_MODE_MAX_SIZE),
1041 SEDI_DRIVER_ERROR_PARAMETER);
1042 context->base->ctrlr1 = len / context->frame_size - 1;
1043 context->dma_tx_finished = true;
1044 context->tx_channel = 0xFF;
1045 } else {
1046 /* Enable both channel to do transfer */
1047 config_and_enable_dma_channel(
1048 spi_device, tx_dma, tx_handshake, tx_dma_chan, width, burst,
1049 (uint32_t)data_out, lld_spi_dr_address(context->base), len,
1050 true);
1051 config_and_enable_dma_channel(spi_device, rx_dma, rx_handshake,
1052 rx_dma_chan, width, burst,
1053 lld_spi_dr_address(context->base),
1054 (uint32_t)data_in, len, false);
1055 #ifdef SPI_DW_2_0
1056 dw_spi_set_start_condition(context, len);
1057 #endif
1058 }
1059
1060 lld_spi_config_interrupt(context->base, REG_INT_ERROR);
1061
1062 lld_spi_dma_enable(context->base, true);
1063
1064 lld_spi_enable(context->base, true);
1065
1066 if ((context->is_quad) && (context->quad_config)) {
1067 const sedi_spi_enhanced_config_t *config = context->quad_config;
1068
1069 /* Instruction need 1 entry */
1070 SEDI_PREG_SET(SPI, DR0, *(config->inst_buf), &spi->dr0);
1071 SEDI_PREG_SET(SPI, DR0, *((uint32_t *)(config->addr_buf)), &spi->dr0);
1072 /* After fill in addr and instruction, no need to keep quad state,
1073 just transfer data as standard SPI */
1074 context->is_quad = false;
1075 context->quad_config = NULL;
1076 }
1077
1078 if (context->transfer_mode == SEDI_RBFV(SPI, CTRLR0, TMOD, TX_ONLY)) {
1079 sedi_dma_start_transfer(tx_dma, tx_dma_chan, (uint32_t)data_out,
1080 lld_spi_dr_address(context->base), len);
1081 } else if (context->transfer_mode == SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) {
1082 sedi_dma_start_transfer(rx_dma, rx_dma_chan, lld_spi_dr_address(context->base),
1083 (uint32_t)data_in, len);
1084 } else {
1085 /* the SPI transaction may interrupted by some other events between Tx/Rx dma
1086 * enable, which probably lead to rx fifo overflow, start rx channel firstly.
1087 */
1088 sedi_dma_start_transfer(rx_dma, rx_dma_chan, lld_spi_dr_address(context->base),
1089 (uint32_t)data_in, len);
1090 sedi_dma_start_transfer(tx_dma, tx_dma_chan, (uint32_t)data_out,
1091 lld_spi_dr_address(context->base), len);
1092 }
1093
1094 #endif
1095 return SEDI_DRIVER_OK;
1096 }
1097
sedi_spi_poll_transfer(IN sedi_spi_t spi_device,IN uint8_t * data_out,OUT uint8_t * data_in,IN uint32_t num)1098 int32_t sedi_spi_poll_transfer(IN sedi_spi_t spi_device, IN uint8_t *data_out,
1099 OUT uint8_t *data_in, IN uint32_t num)
1100 {
1101 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
1102
1103 uint32_t tx_num = num, rx_num = num, fill_num = 0, receive_num = 0;
1104 struct spi_context *context = &spi_contexts[spi_device];
1105
1106 DBG_CHECK(((num % context->frame_size) == 0),
1107 SEDI_DRIVER_ERROR_PARAMETER);
1108
1109 if (context->status.busy) {
1110 return SEDI_DRIVER_ERROR_BUSY;
1111 }
1112
1113 context->status.busy = 1U;
1114 context->data_tx = (void *)data_out;
1115 context->data_rx = (void *)data_in;
1116 context->tx_data_len = num;
1117 context->rx_data_len = num;
1118 context->data_tx_idx = 0;
1119 context->data_rx_idx = 0;
1120 #ifdef SPI_DW_2_0
1121 /* Clear the bit field */
1122 SEDI_PREG_RBF_SET(SPI, TXFTLR, TFT, 0, &context->base->txftlr);
1123 #endif
1124
1125 /* Decide the transfer mode, send, receive or both */
1126 lld_spi_set_transfer_mode(spi_device, data_out, data_in);
1127
1128 /* First convert tx buffer */
1129 if ((context->transfer_mode != SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) &&
1130 (context->is_lsb == true)) {
1131 spi_bit_reverse(context->data_tx, context->tx_data_len,
1132 context->frame_size);
1133 }
1134
1135 /* According to different transfer mode, do different fill or receive */
1136 if (context->transfer_mode == SEDI_RBFV(SPI, CTRLR0, TMOD, TX_ONLY)) {
1137 rx_num = 0;
1138 } else if (context->transfer_mode == SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) {
1139 tx_num = context->frame_size; /* Shall send at least one data
1140 for receive */
1141 DBG_CHECK((num <= SPI_RECEIVE_MODE_MAX_SIZE),
1142 SEDI_DRIVER_ERROR_PARAMETER);
1143 context->base->ctrlr1 = num / context->frame_size - 1;
1144 }
1145
1146 lld_spi_enable(context->base, true);
1147
1148 /* First send some data in both transfer mode */
1149 fill_num = lld_spi_fill_fifo(spi_device, context->frame_size, data_out,
1150 tx_num);
1151 /* Update buffer and number */
1152 if (data_out) {
1153 data_out += fill_num;
1154 }
1155 tx_num -= fill_num;
1156
1157 while ((tx_num > 0) || (rx_num > 0)) {
1158 if (tx_num > 0) {
1159 /* First send some data */
1160 fill_num = lld_spi_fill_fifo(
1161 spi_device, context->frame_size, data_out, tx_num);
1162 /* Update buffer and number */
1163 data_out += fill_num;
1164 tx_num -= fill_num;
1165 }
1166
1167 if (rx_num > 0) {
1168 /* Receive some data */
1169 receive_num = lld_spi_receive_fifo(context->base,
1170 context->frame_size,
1171 data_in, rx_num);
1172 data_in += receive_num;
1173 rx_num -= receive_num;
1174 }
1175 }
1176
1177 /* Waiting for SPI idle */
1178 while (lld_spi_is_busy(context->base))
1179 ;
1180 lld_spi_enable(context->base, false);
1181
1182 context->status.busy = 0U;
1183 context->data_tx_idx = num;
1184 context->data_rx_idx = num;
1185
1186 /* If has rx buffer and need bit reverse */
1187 if ((context->transfer_mode != SEDI_RBFV(SPI, CTRLR0, TMOD, TX_ONLY)) &&
1188 (context->is_lsb == true)) {
1189 spi_bit_reverse(context->data_rx, context->rx_data_len,
1190 context->frame_size);
1191 }
1192
1193 /* If need to recover tx buffer */
1194 if ((context->transfer_mode != SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) &&
1195 (context->is_lsb == true)) {
1196 spi_bit_reverse(context->data_tx, context->tx_data_len,
1197 context->frame_size);
1198 }
1199 return SEDI_DRIVER_OK;
1200 }
1201
sedi_spi_transfer(IN sedi_spi_t spi_device,IN uint8_t * data_out,OUT uint8_t * data_in,IN uint32_t num)1202 int32_t sedi_spi_transfer(IN sedi_spi_t spi_device, IN uint8_t *data_out,
1203 OUT uint8_t *data_in, IN uint32_t num)
1204 {
1205 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
1206
1207 struct spi_context *context = &spi_contexts[spi_device];
1208 sedi_spi_regs_t *spi = context->base;
1209 uint32_t send_count = num;
1210
1211 DBG_CHECK(((num % context->frame_size) == 0),
1212 SEDI_DRIVER_ERROR_PARAMETER);
1213
1214 if (context->status.busy) {
1215 return SEDI_DRIVER_ERROR_BUSY;
1216 }
1217
1218 /* For transfer size less than watermark */
1219 if (num < context->rx_watermark * context->frame_size) {
1220 /* Only shall reset the receive watermark to finish trigger
1221 * interrupt */
1222 lld_spi_set_rx_watermark(context->base,
1223 num / context->frame_size);
1224 } else {
1225 lld_spi_set_rx_watermark(context->base,
1226 (SPI_FIFO_DEPTH / 2 - 1) *
1227 context->frame_size);
1228 }
1229
1230 lld_spi_set_transfer_mode(spi_device, data_out, data_in);
1231
1232 /* For IRQ mode only, if use multiple buffers, cannot change mode in
1233 * transfer */
1234 if ((context->is_cs_continuous == true) && (!context->is_quad)) {
1235 SEDI_PREG_RBFV_SET(SPI, CTRLR0, TMOD, TX_AND_RX, &spi->ctrlr0);
1236 context->transfer_mode = SEDI_RBFV(SPI, CTRLR0, TMOD, TX_AND_RX);
1237 }
1238
1239 context->status.busy = 1U;
1240
1241 context->data_tx = (void *)data_out;
1242 context->data_rx = (void *)data_in;
1243 context->tx_data_len = num;
1244 context->rx_data_len = num;
1245 context->data_tx_idx = 0;
1246 context->data_rx_idx = 0;
1247 #ifdef SPI_DW_2_0
1248 /* Clear the bit field */
1249 SEDI_PREG_RBF_SET(SPI, TXFTLR, TFT, 0, &spi->txftlr);
1250 #endif
1251
1252 /* First convert tx buffer */
1253 if ((context->transfer_mode != SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) &&
1254 (context->is_lsb == true)) {
1255 spi_bit_reverse(context->data_tx, context->tx_data_len,
1256 context->frame_size);
1257 }
1258
1259 /* According to different transfer mode, do different fill or receive */
1260 if (context->transfer_mode == SEDI_RBFV(SPI, CTRLR0, TMOD, TX_ONLY)) {
1261 context->data_rx_idx = num;
1262 } else if (context->transfer_mode == SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) {
1263 send_count = context->frame_size;
1264 DBG_CHECK((num <= SPI_RECEIVE_MODE_MAX_SIZE),
1265 SEDI_DRIVER_ERROR_PARAMETER);
1266 context->base->ctrlr1 = num / context->frame_size - 1;
1267 /* Write into FIFO needs to enable SPI first */
1268 lld_spi_enable(context->base, true);
1269 lld_spi_fill_fifo(spi_device, context->frame_size, data_out,
1270 send_count);
1271 context->data_tx_idx = num;
1272 }
1273
1274 #ifdef SPI_DW_2_0
1275 if (context->transfer_mode != SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) {
1276 dw_spi_set_start_condition(context, num);
1277 }
1278 #endif
1279
1280 lld_spi_enable(context->base, true);
1281
1282 lld_spi_config_interrupt(context->base,
1283 REG_INT_TX | REG_INT_RX | REG_INT_ERROR);
1284
1285 return SEDI_DRIVER_OK;
1286 }
1287
spi_enhanced_config(IN sedi_spi_t spi_device,IN sedi_spi_enhanced_config_t * config)1288 static int32_t spi_enhanced_config(IN sedi_spi_t spi_device,
1289 IN sedi_spi_enhanced_config_t *config)
1290 {
1291 struct spi_context *context = &spi_contexts[spi_device];
1292 sedi_spi_regs_t *spi = context->base;
1293
1294 if (SEDI_PREG_RBFV_IS_SET(SPI, CTRLR0, SPI_FRF, STD_SPI_FRF, &spi->ctrlr0)) {
1295 /* single mode no need to configure */
1296 return 0;
1297 }
1298
1299 /* inst is must, address is option */
1300 if ((config->inst_buf == NULL) || (config->inst_len == 0)) {
1301 return SEDI_DRIVER_ERROR_PARAMETER;
1302 }
1303
1304 context->is_quad = true;
1305 context->quad_config = config;
1306
1307 /* Disable spi first to set registers */
1308 lld_spi_enable(spi, false);
1309 /* add SPI_DW_2_0 here as sedi_spi_reg.h osxml has no SPI_CTRL0*/
1310 #ifdef SPI_DW_2_0
1311 /* Config SPI_CTRL0 register */
1312 SEDI_PREG_RBF_SET(SPI, SPI_CTRLR0, ADDR_L, config->addr_len, &spi->spi_ctrl0);
1313 SEDI_PREG_RBF_SET(SPI, SPI_CTRLR0, INST_L, config->inst_len, &spi->spi_ctrl0);
1314 SEDI_PREG_RBF_SET(SPI, SPI_CTRLR0, WAIT_CYCLES, config->dummy_cycles, &spi->spi_ctrl0);
1315 #endif
1316
1317 return 0;
1318 }
1319
sedi_spi_enhanced_transfer(IN sedi_spi_t spi_device,IN sedi_spi_enhanced_config_t * config)1320 int32_t sedi_spi_enhanced_transfer(IN sedi_spi_t spi_device,
1321 IN sedi_spi_enhanced_config_t *config)
1322 {
1323 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
1324
1325 /* Handle enhanced spi config */
1326 spi_enhanced_config(spi_device, config);
1327
1328 /* start transfer */
1329 return sedi_spi_transfer(spi_device, config->tx_buf, config->rx_buf,
1330 config->data_len);
1331 }
1332
sedi_spi_dma_enhanced_transfer(IN sedi_spi_t spi_device,IN uint32_t tx_dma,IN uint32_t tx_dma_chan,IN uint32_t rx_dma,IN uint32_t rx_dma_chan,IN sedi_spi_enhanced_config_t * config)1333 int32_t sedi_spi_dma_enhanced_transfer(IN sedi_spi_t spi_device, IN uint32_t tx_dma,
1334 IN uint32_t tx_dma_chan, IN uint32_t rx_dma,
1335 IN uint32_t rx_dma_chan,
1336 IN sedi_spi_enhanced_config_t *config)
1337 {
1338 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
1339
1340 /* Handle enhanced spi config */
1341 spi_enhanced_config(spi_device, config);
1342
1343 /* start transfer */
1344 return sedi_spi_dma_transfer(spi_device, tx_dma, tx_dma_chan,
1345 config->tx_buf, rx_dma, rx_dma_chan,
1346 config->rx_buf, config->data_len);
1347 }
1348
sedi_spi_update_tx_buf(IN sedi_spi_t spi_device,IN uint8_t * tx_buf,IN uint32_t len)1349 int32_t sedi_spi_update_tx_buf(IN sedi_spi_t spi_device, IN uint8_t *tx_buf,
1350 IN uint32_t len)
1351 {
1352 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
1353
1354 struct spi_context *context = &spi_contexts[spi_device];
1355
1356 DBG_CHECK(((len % context->frame_size) == 0),
1357 SEDI_DRIVER_ERROR_PARAMETER);
1358
1359 /* This function can only used in continuous mode */
1360 DBG_CHECK((context->is_cs_continuous == true),
1361 SEDI_DRIVER_ERROR_UNSUPPORTED);
1362
1363 if (len == 0) {
1364 return SEDI_DRIVER_ERROR_PARAMETER;
1365 }
1366 /* As continuous mode all use both transfer mode, rx also need to update
1367 * length */
1368 context->data_tx = (void *)tx_buf;
1369 context->tx_data_len += len;
1370
1371 return SEDI_DRIVER_OK;
1372 }
1373
sedi_spi_update_rx_buf(IN sedi_spi_t spi_device,OUT uint8_t * rx_buf,IN uint32_t len)1374 int32_t sedi_spi_update_rx_buf(IN sedi_spi_t spi_device, OUT uint8_t *rx_buf,
1375 IN uint32_t len)
1376 {
1377 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
1378
1379 struct spi_context *context = &spi_contexts[spi_device];
1380
1381 DBG_CHECK(((len % context->frame_size) == 0),
1382 SEDI_DRIVER_ERROR_PARAMETER);
1383
1384 /* This function can only used in continuous mode */
1385 DBG_CHECK((context->is_cs_continuous == true),
1386 SEDI_DRIVER_ERROR_UNSUPPORTED);
1387
1388 if (len == 0) {
1389 return SEDI_DRIVER_ERROR_PARAMETER;
1390 }
1391
1392 /* As continuous mode all use both transfer mode, rx also need to update
1393 * length */
1394 context->data_rx = (void *)rx_buf;
1395 context->rx_data_len += len;
1396
1397 return SEDI_DRIVER_OK;
1398 }
1399
spi_isr(IN sedi_spi_t spi_device)1400 void spi_isr(IN sedi_spi_t spi_device)
1401 {
1402 struct spi_context *context = &spi_contexts[spi_device];
1403 uint32_t intr_stat;
1404 int error = false;
1405 int end = false;
1406 int event;
1407 int idx;
1408 uint32_t temp, rx_len;
1409
1410 intr_stat = lld_spi_interrupt_clear(context->base);
1411
1412 /* To reduce the interrupt times, send/receive as many as possible */
1413 if (intr_stat & (REG_INT_RX | REG_INT_TX)) {
1414 while (context->data_tx_idx < context->tx_data_len) {
1415 temp = context->tx_data_len - context->data_tx_idx;
1416 idx = lld_spi_fill_fifo(spi_device, context->frame_size,
1417 context->data_tx, temp);
1418
1419 context->data_tx_idx += idx;
1420 if (context->data_tx != NULL) {
1421 context->data_tx += idx;
1422 }
1423
1424 if ((context->data_tx_idx == context->tx_data_len) &&
1425 (context->cb_event)) {
1426 context->cb_event(SEDI_SPI_EVENT_TX_FINISHED,
1427 context->cb_param);
1428 }
1429
1430 if (idx < temp) {
1431 /* If last transfer filled FIFO full, break */
1432 break;
1433 }
1434 }
1435
1436 while (context->data_rx_idx < context->rx_data_len) {
1437 rx_len = context->rx_data_len - context->data_rx_idx;
1438 idx = lld_spi_receive_fifo(context->base,
1439 context->frame_size,
1440 context->data_rx, rx_len);
1441
1442 context->data_rx_idx += idx;
1443 if (context->data_rx != NULL) {
1444 context->data_rx += idx;
1445 }
1446
1447 /*Check if need to modify watermark for last transfer*/
1448 if ((context->rx_data_len - context->data_rx_idx <
1449 context->frame_size * context->rx_watermark) &&
1450 (context->rx_data_len != context->data_rx_idx)) {
1451 temp = (context->rx_data_len -
1452 context->data_rx_idx) /
1453 context->frame_size;
1454 lld_spi_set_rx_watermark(context->base, temp);
1455 context->rx_watermark = temp;
1456 }
1457
1458 if ((context->data_rx_idx == context->rx_data_len) &&
1459 (context->cb_event)) {
1460 context->cb_event(SEDI_SPI_EVENT_RX_FINISHED,
1461 context->cb_param);
1462 }
1463
1464 if (idx < rx_len) {
1465 /* If last transfer received all data in FIFO,
1466 * break */
1467 break;
1468 }
1469 }
1470 }
1471
1472 if ((context->data_rx_idx == context->tx_data_len) &&
1473 (context->data_tx_idx == context->rx_data_len)) {
1474 end = true;
1475 event = SEDI_SPI_EVENT_COMPLETE;
1476 /* Wait for Data in FIFO send out while not continuous */
1477 while (lld_spi_is_busy(context->base))
1478 ;
1479
1480 /* If need to reverse rx buffer */
1481 if ((context->transfer_mode != SEDI_RBFV(SPI, CTRLR0, TMOD, TX_ONLY)) &&
1482 (context->is_lsb == true)) {
1483 context->data_rx -= context->data_rx_idx;
1484 spi_bit_reverse(context->data_rx, context->rx_data_len,
1485 context->frame_size);
1486 }
1487 /* If need to recover tx buffer */
1488 if ((context->transfer_mode != SEDI_RBFV(SPI, CTRLR0, TMOD, RX_ONLY)) &&
1489 (context->is_lsb == true)) {
1490 context->data_tx -= context->data_tx_idx;
1491 spi_bit_reverse(context->data_tx, context->tx_data_len,
1492 context->frame_size);
1493 }
1494 }
1495
1496 if (intr_stat & REG_INT_ERROR) {
1497 error = true;
1498 event = SEDI_SPI_EVENT_DATA_LOST;
1499 context->status.data_lost = true;
1500 }
1501
1502 if ((error || end) && (context->status.busy != 0)) {
1503 context->status.busy = 0;
1504 lld_spi_config_interrupt(context->base, REG_INT_NONE);
1505 lld_spi_enable(context->base, false);
1506
1507 if (context->cb_event) {
1508 context->cb_event(event, context->cb_param);
1509 }
1510 }
1511 }
1512