1 /*
2 * Copyright (c) 2023 Intel Corporation
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include "sedi_spi_dw_apb.h"
8 #include "sedi_driver_pm.h"
9 #include "sedi_driver_core.h"
10
11 static const sedi_driver_version_t driver_version = {SEDI_SPI_API_VERSION,
12 SEDI_SPI_DRV_VERSION};
13
14 static sedi_spi_capabilities_t driver_capabilities[SEDI_SPI_NUM] = {0};
15
16 static struct spi_context spi_contexts[SEDI_SPI_NUM];
17
18 static const uint8_t bit_reverse_table[] = {
19 0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0,
20 0x30, 0xB0, 0x70, 0xF0, 0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8,
21 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8, 0x04, 0x84, 0x44, 0xC4,
22 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4,
23 0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC,
24 0x3C, 0xBC, 0x7C, 0xFC, 0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2,
25 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2, 0x0A, 0x8A, 0x4A, 0xCA,
26 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA,
27 0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6,
28 0x36, 0xB6, 0x76, 0xF6, 0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE,
29 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE, 0x01, 0x81, 0x41, 0xC1,
30 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1,
31 0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9,
32 0x39, 0xB9, 0x79, 0xF9, 0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5,
33 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5, 0x0D, 0x8D, 0x4D, 0xCD,
34 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD,
35 0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3,
36 0x33, 0xB3, 0x73, 0xF3, 0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB,
37 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB, 0x07, 0x87, 0x47, 0xC7,
38 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7,
39 0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF,
40 0x3F, 0xBF, 0x7F, 0xFF
41 };
42
msb_lsb_convert_8bits(uint8_t * val,uint32_t len)43 static void msb_lsb_convert_8bits(uint8_t *val, uint32_t len)
44 {
45 uint32_t i;
46 uint8_t idx;
47
48 for (i = 0; i < len; i++) {
49 idx = val[i];
50 val[i] = bit_reverse_table[idx];
51 }
52 }
53
msb_lsb_convert_16bits(uint16_t * val,uint32_t len)54 static void msb_lsb_convert_16bits(uint16_t *val, uint32_t len)
55 {
56 uint32_t i;
57 uint16_t idx;
58
59 for (i = 0; i < len; i++) {
60 idx = val[i];
61 val[i] = (bit_reverse_table[idx & 0xFF] << 8U) |
62 (bit_reverse_table[(idx & 0xFF00) >> 8U]);
63 }
64 }
65
lld_spi_enable(spi_reg_t * spi,bool enable)66 static inline void lld_spi_enable(spi_reg_t *spi, bool enable)
67 {
68 uint32_t val = enable ? 1 : 0;
69
70 if (spi->ssienr == val) {
71 return;
72 }
73 /* prevent pending interrupt */
74 spi->imr = 0;
75
76 spi->ssienr = val;
77 while (spi->ssienr != val)
78 ;
79 }
80
lld_spi_dma_enable(spi_reg_t * spi,bool enable)81 static inline void lld_spi_dma_enable(spi_reg_t *spi, bool enable)
82 {
83 spi->dmacr = enable ? REG_DMA_ENABLE : 0;
84 }
85
lld_spi_config_interrupt(spi_reg_t * spi,uint32_t mask)86 static inline void lld_spi_config_interrupt(spi_reg_t *spi, uint32_t mask)
87 {
88 spi->imr = mask;
89 }
90
lld_spi_is_busy(spi_reg_t * spi)91 static inline bool lld_spi_is_busy(spi_reg_t *spi)
92 {
93 return ((spi->sr & REG_SR_BUSY) || (!(spi->sr & REG_SR_TX_EMPTY)))
94 ? true
95 : false;
96 }
97
lld_spi_is_enabled(spi_reg_t * spi)98 static inline bool lld_spi_is_enabled(spi_reg_t *spi)
99 {
100 return spi->ssienr ? true : false;
101 }
102
lld_spi_interrupt_clear(spi_reg_t * spi)103 static inline uint32_t lld_spi_interrupt_clear(spi_reg_t *spi)
104 {
105 uint32_t tmp;
106 uint32_t isr;
107
108 PARAM_UNUSED(tmp);
109
110 isr = spi->isr;
111 tmp = spi->icr;
112
113 /* Clear all error interrupt by read*/
114 tmp = spi->txoicr;
115 tmp = spi->rxoicr;
116 tmp = spi->rxuicr;
117
118 return isr;
119 }
120
lld_spi_default_config(sedi_spi_t spi_device)121 static int lld_spi_default_config(sedi_spi_t spi_device)
122 {
123 struct spi_context *context = &spi_contexts[spi_device];
124 spi_reg_t *spi = context->base;
125
126 uint32_t ctrl0;
127 uint32_t watermark = SPI_FIFO_DEPTH / 2 - 1;
128
129 uint8_t loopback = false;
130 uint8_t width = SPI_BITWIDTH_8BITS;
131 uint32_t prescale = DEFAULT_PRESCALE;
132 uint8_t cs_mask = 0x1;
133
134 ctrl0 = width - 1;
135 ctrl0 |= loopback ? REG_CTRL0_LOOPBACK : 0;
136 ctrl0 |= (width - 1) << OFFSET_CTRL0_WIDTH;
137
138 /* Disable SPI first */
139 lld_spi_enable(spi, false);
140
141 /* Set default SPI watermark */
142 spi->txftlr = watermark;
143 spi->rxftlr = watermark;
144 spi->dmatdlr = watermark;
145 spi->dmardlr = watermark;
146
147 spi->ctrl0 = ctrl0;
148 spi->baudr = prescale;
149 spi->ser = cs_mask;
150
151 /* Update context default settings */
152 context->tx_watermark = watermark + 1U;
153 context->rx_watermark = watermark + 1U;
154 context->prescale = DEFAULT_PRESCALE;
155 context->frame_size = 1U;
156 context->is_lsb = false;
157
158 return 0;
159 }
160
lld_spi_config_cpol_cpha(spi_reg_t * spi,int cpol,int cpha)161 static inline void lld_spi_config_cpol_cpha(spi_reg_t *spi, int cpol, int cpha)
162 {
163 spi->ctrl0 &= ~(REG_CTRL0_CPHA | REG_CTRL0_CPOL);
164 spi->ctrl0 |= cpha ? REG_CTRL0_CPHA : 0;
165 spi->ctrl0 |= cpol ? REG_CTRL0_CPOL : 0;
166 }
167
lld_spi_config_loopback(spi_reg_t * spi,int loopback)168 static inline void lld_spi_config_loopback(spi_reg_t *spi, int loopback)
169 {
170 spi->ctrl0 &= ~REG_CTRL0_LOOPBACK;
171 spi->ctrl0 |= loopback ? REG_CTRL0_LOOPBACK : 0;
172 }
173
lld_spi_config_prescale(spi_reg_t * spi,uint32_t prescale)174 static inline void lld_spi_config_prescale(spi_reg_t *spi, uint32_t prescale)
175 {
176 spi->baudr = prescale;
177 }
178
lld_spi_config_width(spi_reg_t * spi,uint8_t width)179 static inline void lld_spi_config_width(spi_reg_t *spi, uint8_t width)
180 {
181 spi->ctrl0 &= ~MASK_CTRL0_WIDTH;
182 spi->ctrl0 |= (width - 1) << OFFSET_CTRL0_WIDTH;
183 }
184
lld_spi_set_tx_watermark(spi_reg_t * spi,uint32_t watermark)185 static inline void lld_spi_set_tx_watermark(spi_reg_t *spi, uint32_t watermark)
186 {
187 spi->txftlr = watermark - 1;
188 }
189
lld_spi_set_rx_watermark(spi_reg_t * spi,uint32_t watermark)190 static inline void lld_spi_set_rx_watermark(spi_reg_t *spi, uint32_t watermark)
191 {
192 spi->rxftlr = watermark - 1;
193 }
194
lld_spi_config_cs(spi_reg_t * spi,uint32_t cs_mask)195 static inline void lld_spi_config_cs(spi_reg_t *spi, uint32_t cs_mask)
196 {
197 spi->ser = cs_mask;
198 }
199
lld_spi_set_transfer_mode(sedi_spi_t spi_device,IN uint8_t * data_out,OUT uint8_t * data_in)200 static void lld_spi_set_transfer_mode(sedi_spi_t spi_device,
201 IN uint8_t *data_out,
202 OUT uint8_t *data_in)
203 {
204 struct spi_context *context = &spi_contexts[spi_device];
205 spi_reg_t *spi = context->base;
206 uint32_t ctrl0 = spi->ctrl0;
207
208 ctrl0 &= ~SPI_CTRL0_TMOD_MASK;
209 if (data_out == NULL) {
210 /* Set to receive only mode */
211 ctrl0 |= SPI_CTRL0_RECEIVE_MODE;
212 context->transfer_mode = SPI_TRANSFER_MODE_RECEIVE;
213 } else if (data_in == NULL) {
214 /* Set to receive only mode */
215 ctrl0 |= SPI_CTRL0_SEND_MODE;
216 context->transfer_mode = SPI_TRANSFER_MODE_SEND;
217 } else {
218 ctrl0 |= SPI_CTRL0_BOTH_MODE;
219 context->transfer_mode = SPI_TRANSFER_MODE_BOTH;
220 }
221
222 spi->ctrl0 = ctrl0;
223 }
224
lld_spi_fill_fifo(sedi_spi_t spi_device,uint8_t frame_size,IN uint8_t * buff,uint32_t count)225 static int lld_spi_fill_fifo(sedi_spi_t spi_device, uint8_t frame_size,
226 IN uint8_t *buff, uint32_t count)
227 {
228 struct spi_context *context = &spi_contexts[spi_device];
229 spi_reg_t *spi = context->base;
230 uint32_t size = 0;
231 uint32_t data = 0;
232
233 /* In quad mode, need to send opcode and addr first */
234 if ((context->is_quad) && (context->quad_config)) {
235 const sedi_spi_enhanced_config_t *config = context->quad_config;
236
237 /* Instruction need 1 entry */
238 spi->dr = *(config->inst_buf);
239 spi->dr = *((uint32_t *)(config->addr_buf));
240 /* After fill in addr and instruction, no need to keep quad state,
241 just transfer data as standard SPI */
242 context->is_quad = false;
243 context->quad_config = NULL;
244 if (context->transfer_mode == SPI_TRANSFER_MODE_RECEIVE) {
245 return 0;
246 }
247 }
248
249 /* Check how many empty entry in FIFO */
250 size = (SPI_FIFO_DEPTH - spi->txflr) * frame_size;
251
252 /* Get the number which can be filled to fifo */
253 size = (count > size) ? size : count;
254 /* Used to return the actual fill size in bytes */
255 count = size;
256 while (size) {
257 /* Get the data in a FIFO entry */
258 if (buff) {
259 switch (frame_size) {
260 case SPI_FRAME_SIZE_1_BYTE:
261 data = (uint32_t)(*buff);
262 break;
263 case SPI_FRAME_SIZE_2_BYTES:
264 data = (uint32_t)(*(uint16_t *)buff);
265 break;
266 default:
267 break;
268 }
269 /* Update size */
270 buff += frame_size;
271 } else {
272 data = context->dummy_data;
273 }
274 /* Write data */
275 spi->dr = data;
276 size -= frame_size;
277 }
278
279 return count;
280 }
281
lld_spi_receive_fifo(spi_reg_t * spi,uint8_t frame_size,OUT uint8_t * buff,uint32_t count)282 static int lld_spi_receive_fifo(spi_reg_t *spi, uint8_t frame_size,
283 OUT uint8_t *buff, uint32_t count)
284 {
285 uint32_t size = spi->rxflr * frame_size;
286 uint32_t data;
287
288 /* Get the number which can be filled to fifo */
289 size = (count > size) ? size : count;
290 /* Used to return the actual fill size in bytes */
291 count = size;
292 while (size) {
293 /* Get the data in a FIFO entry */
294 data = spi->dr;
295 if (buff) {
296 switch (frame_size) {
297 case SPI_FRAME_SIZE_1_BYTE:
298 *buff = (data & 0xFF);
299 break;
300 case SPI_FRAME_SIZE_2_BYTES:
301 buff[0] = (data & 0xFF);
302 buff[1] = ((data >> 8U) & 0xFF);
303 break;
304 default:
305 break;
306 }
307 /* Update size and buff */
308 buff += frame_size;
309 }
310 size -= frame_size;
311 }
312
313 return count;
314 }
315
lld_spi_dr_address(spi_reg_t * spi)316 static inline uint32_t lld_spi_dr_address(spi_reg_t *spi)
317 {
318 #ifdef SEDI_SPI_USE_DMA
319 uint32_t ret = SEDI_SPI_0_REG_DMA + (uint32_t)&((spi_reg_t *)0)->dr;
320 return ret;
321 #else
322 return (uint32_t)&spi->dr;
323 #endif
324 }
325
spi_bit_reverse(uint8_t * val,uint32_t len,uint8_t frame_size)326 static void spi_bit_reverse(uint8_t *val, uint32_t len, uint8_t frame_size)
327 {
328 if (frame_size == SPI_FRAME_SIZE_1_BYTE) {
329 msb_lsb_convert_8bits(val, len);
330 } else {
331 msb_lsb_convert_16bits((uint16_t *)val, len / frame_size);
332 }
333 }
334
lld_spi_set_ti_mode(spi_reg_t * spi)335 static void lld_spi_set_ti_mode(spi_reg_t *spi)
336 {
337 if (lld_spi_is_enabled(spi) == true) {
338 lld_spi_enable(spi, false);
339 }
340 spi->ctrl0 &= ~REG_CTRL0_FRF_MASK;
341 spi->ctrl0 |= REG_CTRL0_FRF_TI_SSP;
342 }
343
lld_spi_set_microwire_mode(spi_reg_t * spi,sedi_spi_microwire_config_t * config)344 static void lld_spi_set_microwire_mode(spi_reg_t *spi,
345 sedi_spi_microwire_config_t *config)
346 {
347 uint32_t mwcr;
348
349 if (lld_spi_is_enabled(spi) == true) {
350 lld_spi_enable(spi, false);
351 }
352 spi->ctrl0 &= ~REG_CTRL0_FRF_MASK;
353 spi->ctrl0 |= REG_CTRL0_FRF_MICROWIRE;
354
355 /* Configure microwire mode */
356 mwcr = ((config->microwire_handshake << REG_MWCR_MHS_SHIFT) |
357 (config->data_direction_tx << REG_MWCR_MDD_SHIFT) |
358 (config->sequential_mode << REG_MWCR_MWMOD_SHIFT));
359
360 spi->mwcr = mwcr;
361 }
362
lld_spi_set_line_mode(spi_reg_t * spi,spi_line_mode_t mode)363 static void lld_spi_set_line_mode(spi_reg_t *spi, spi_line_mode_t mode)
364 {
365 uint32_t val;
366
367 lld_spi_enable(spi, false);
368
369 val = spi->ctrl0;
370 val &= ~SPI_FRAME_FORMAT_MASK;
371 val |= (mode << SPI_FRAME_FORMAT_SHIFT);
372 spi->ctrl0 = val;
373 }
374
375 #ifdef SPI_DW_2_0
dw_spi_set_start_condition(struct spi_context * context,uint32_t num)376 static void dw_spi_set_start_condition(struct spi_context *context, uint32_t num)
377 {
378 spi_reg_t *spi = context->base;
379 uint32_t start_frame = 0;
380
381 /* Set the send start condition to improve efficiency */
382 if (context->quad_config) {
383 /* enhanced mode includes 2 frames for opcode and addr */
384 start_frame = num / (context->frame_size) + 2;
385 } else {
386 start_frame = num / (context->frame_size);
387 }
388 /* Clear the bit field */
389 spi->txftlr &= ~SPI_TXFTLR_TXFTHR_MASK;
390 /* Compare with FIFO depth */
391 if (start_frame < SPI_FIFO_DEPTH) {
392 spi->txftlr |= ((start_frame - 1) << SPI_TXFTLR_TXFTHR_SHIFT);
393 } else {
394 spi->txftlr |= ((SPI_FIFO_DEPTH - 1) << SPI_TXFTLR_TXFTHR_SHIFT);
395 }
396 }
397
398 #endif
399 /******************************************************************************
400 * SEDI interface
401 *****************************************************************************/
402
sedi_spi_get_version(void)403 sedi_driver_version_t sedi_spi_get_version(void)
404 {
405 return driver_version;
406 }
407
sedi_spi_get_capabilities(IN sedi_spi_t spi_device,sedi_spi_capabilities_t * cap)408 int32_t sedi_spi_get_capabilities(IN sedi_spi_t spi_device,
409 sedi_spi_capabilities_t *cap)
410 {
411 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
412
413 if (sedi_dev_is_self_owned(SEDI_DEVID_SPI0 + spi_device)) {
414 driver_capabilities[spi_device].is_available = 1;
415 } else {
416 driver_capabilities[spi_device].is_available = 0;
417 }
418
419 driver_capabilities[spi_device].ti_ssi = 1;
420 driver_capabilities[spi_device].microwire = 1;
421 driver_capabilities[spi_device].mode_fault = 0;
422
423 *cap = driver_capabilities[spi_device];
424
425 return SEDI_DRIVER_OK;
426 }
427
sedi_spi_init(IN sedi_spi_t spi_device,IN sedi_spi_event_cb_t cb_event,INOUT void * param,IN uint32_t base)428 int32_t sedi_spi_init(IN sedi_spi_t spi_device, IN sedi_spi_event_cb_t cb_event,
429 INOUT void *param, IN uint32_t base)
430 {
431 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
432
433 struct spi_context *context = &spi_contexts[spi_device];
434
435 if (!sedi_dev_is_self_owned(SEDI_DEVID_SPI0 + spi_device)) {
436 return SEDI_DRIVER_ERROR_NO_DEV;
437 }
438
439 context->cb_event = cb_event;
440 context->cb_param = param;
441
442 context->base = (spi_reg_t *)base;
443
444 return SEDI_DRIVER_OK;
445 }
446
sedi_spi_uninit(IN sedi_spi_t spi_device)447 int32_t sedi_spi_uninit(IN sedi_spi_t spi_device)
448 {
449 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
450
451 struct spi_context *context = &spi_contexts[spi_device];
452
453 context->cb_event = NULL;
454 context->is_lsb = false;
455
456 return SEDI_DRIVER_OK;
457 }
458
sedi_spi_set_power(IN sedi_spi_t spi_device,IN sedi_power_state_t state)459 int32_t sedi_spi_set_power(IN sedi_spi_t spi_device,
460 IN sedi_power_state_t state)
461 {
462 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
463 sedi_devid_t id = SEDI_DEVID_SPI0 + spi_device;
464 struct spi_context *context = &spi_contexts[spi_device];
465 int32_t ret = SEDI_DRIVER_OK;
466
467 switch (state) {
468 case SEDI_POWER_FULL:
469 sedi_pm_set_device_power(id, state);
470 lld_spi_default_config(spi_device);
471 break;
472 case SEDI_POWER_SUSPEND:
473 case SEDI_POWER_FORCE_SUSPEND:
474 case SEDI_POWER_LOW:
475 lld_spi_enable(context->base, false);
476 sedi_pm_set_device_power(id, state);
477 break;
478 case SEDI_POWER_OFF:
479 default:
480 ret = SEDI_DRIVER_ERROR_UNSUPPORTED;
481 break;
482 }
483
484 return ret;
485 }
486
sedi_spi_get_data_count(IN sedi_spi_t spi_device)487 int32_t sedi_spi_get_data_count(IN sedi_spi_t spi_device)
488 {
489 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
490
491 struct spi_context *context = &spi_contexts[spi_device];
492
493 if (context->data_tx) {
494 return context->data_tx_idx;
495 } else {
496 return context->data_rx_idx;
497 }
498 }
499
sedi_spi_get_status(IN sedi_spi_t spi_device,sedi_spi_status_t * status)500 int32_t sedi_spi_get_status(IN sedi_spi_t spi_device, sedi_spi_status_t *status)
501 {
502 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
503 DBG_CHECK(NULL != status, SEDI_DRIVER_ERROR_PARAMETER);
504
505 struct spi_context *context = &spi_contexts[spi_device];
506 spi_reg_t *reg = context->base;
507
508 status->busy = context->status.busy;
509 status->data_lost = context->status.data_lost;
510 status->mode_fault = context->status.mode_fault;
511 status->isr = reg->isr;
512 status->sr = reg->sr;
513 status->txflr = reg->txflr;
514 status->rxflr = reg->rxflr;
515
516 return SEDI_DRIVER_OK;
517 }
518
sedi_spi_control(IN sedi_spi_t spi_device,IN uint32_t control,IN uint32_t arg)519 int32_t sedi_spi_control(IN sedi_spi_t spi_device, IN uint32_t control,
520 IN uint32_t arg)
521 {
522 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
523 DBG_CHECK(control < SEDI_SPI_IOCTL_NUM, SEDI_DRIVER_ERROR_PARAMETER);
524
525 struct spi_context *context = &spi_contexts[spi_device];
526
527 switch (control) {
528 case SEDI_SPI_IOCTL_CPOL0_CPHA0:
529 lld_spi_config_cpol_cpha(context->base, 0, 0);
530 break;
531 case SEDI_SPI_IOCTL_CPOL0_CPHA1:
532 lld_spi_config_cpol_cpha(context->base, 0, 1);
533 break;
534 case SEDI_SPI_IOCTL_CPOL1_CPHA0:
535 lld_spi_config_cpol_cpha(context->base, 1, 0);
536 break;
537 case SEDI_SPI_IOCTL_CPOL1_CPHA1:
538 lld_spi_config_cpol_cpha(context->base, 1, 1);
539 break;
540 case SEDI_SPI_IOCTL_TI_SSI:
541 lld_spi_set_ti_mode(context->base);
542 break;
543 case SEDI_SPI_IOCTL_MICROWIRE:
544 lld_spi_set_microwire_mode(context->base,
545 (sedi_spi_microwire_config_t *)arg);
546 break;
547 case SEDI_SPI_IOCTL_MSB:
548 context->is_lsb = false;
549 break;
550 case SEDI_SPI_IOCTL_LSB:
551 context->is_lsb = true;
552 break;
553 case SEDI_SPI_IOCTL_DATA_WIDTH:
554 DBG_CHECK(((arg == SPI_BITWIDTH_4BITS) ||
555 (arg == SPI_BITWIDTH_8BITS) ||
556 (arg == SPI_BITWIDTH_16BITS)),
557 SEDI_DRIVER_ERROR_PARAMETER);
558 context->frame_size = (uint8_t)arg / 8U;
559 /* For 4 bits operation, operate like 8 bits */
560 if (context->frame_size == 0) {
561 context->frame_size = SPI_FRAME_SIZE_1_BYTE;
562 }
563 lld_spi_config_width(context->base, (uint8_t)arg);
564 break;
565 case SEDI_SPI_IOCTL_CS_UNUSED:
566 lld_spi_config_cs(context->base, 0U);
567 break;
568 case SEDI_SPI_IOCTL_CS_HW:
569 lld_spi_config_cs(context->base, (uint32_t)arg);
570 break;
571 case SEDI_SPI_IOCTL_SPEED_SET:
572 context->prescale = SSI_IC_FREQ / (uint32_t)arg;
573 if (context->prescale < SSI_PRESCALE_MIN) {
574 context->prescale = SSI_PRESCALE_MIN;
575 }
576 lld_spi_config_prescale(context->base, context->prescale);
577 break;
578 case SEDI_SPI_IOCTL_TX_WATERMARK_SET:
579 context->tx_watermark = (uint32_t)arg;
580 lld_spi_set_tx_watermark(context->base, (uint32_t)arg);
581 break;
582 case SEDI_SPI_IOCTL_RX_WATERMARK_SET:
583 context->rx_watermark = (uint32_t)arg;
584 lld_spi_set_rx_watermark(context->base, (uint32_t)arg);
585 break;
586 case SEDI_SPI_IOCTL_DUMMY_DATA:
587 context->dummy_data = (uint32_t)arg;
588 break;
589 case SEDI_SPI_IOCTL_LOOPBACK:
590 lld_spi_config_loopback(context->base, (bool)arg);
591 break;
592 case SEDI_SPI_IOCTL_SPEED_GET:
593 *((uint32_t *)arg) = SSI_IC_FREQ / context->prescale;
594 break;
595 case SEDI_SPI_IOCTL_TX_WATERMARK_GET:
596 *((uint32_t *)arg) = context->tx_watermark;
597 break;
598 case SEDI_SPI_IOCTL_RX_WATERMARK_GET:
599 *((uint32_t *)arg) = context->rx_watermark;
600 break;
601 case SEDI_SPI_IOCTL_ABORT:
602 lld_spi_enable(context->base, false);
603 lld_spi_config_interrupt(context->base, REG_INT_NONE);
604 #ifdef SEDI_SPI_USE_DMA
605 lld_spi_dma_enable(context->base, false);
606 #endif
607 context->status.busy = 0;
608 break;
609 case SEDI_SPI_IOCTL_BUFFER_SETS:
610 context->is_cs_continuous = (bool)arg;
611 break;
612 case SEDI_SPI_IOCTL_LINE_MODE:
613 lld_spi_set_line_mode(context->base, (spi_line_mode_t)arg);
614 break;
615 default:
616 break;
617 }
618
619 return SEDI_DRIVER_OK;
620 }
621
622 #ifdef SEDI_SPI_USE_DMA
623 static void callback_dma_transfer(const sedi_dma_t dma, const int chan,
624 const int event, void *param);
625
config_and_enable_dma_channel(sedi_spi_t spi_dev,int dma,int handshake,int chan,int width,int burst,uint32_t src,uint32_t dst,uint32_t len,int is_tx)626 static int config_and_enable_dma_channel(sedi_spi_t spi_dev, int dma,
627 int handshake, int chan, int width,
628 int burst, uint32_t src, uint32_t dst,
629 uint32_t len, int is_tx)
630 {
631 int ret;
632 int dma_dir;
633 int dma_per_dir;
634 dma_transfer_width_t wid = DMA_TRANS_WIDTH_8;
635
636 PARAM_UNUSED(
637 burst); /* Set burst to 1 to finish transfer all data size */
638
639 if (is_tx) {
640 dma_dir = DMA_MEMORY_TO_PERIPHERAL;
641 dma_per_dir = DMA_HS_PER_TX;
642 } else {
643 dma_dir = DMA_PERIPHERAL_TO_MEMORY;
644 dma_per_dir = DMA_HS_PER_RX;
645 }
646
647 switch (width) {
648 case 1:
649 wid = DMA_TRANS_WIDTH_8;
650 break;
651 case 2:
652 wid = DMA_TRANS_WIDTH_16;
653 break;
654 case 4:
655 wid = DMA_TRANS_WIDTH_32;
656 break;
657 default:
658 break;
659 }
660
661 ret = sedi_dma_init(dma, chan, callback_dma_transfer, (void *)spi_dev);
662 DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
663
664 ret = sedi_dma_set_power(dma, chan, SEDI_POWER_FULL);
665 DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
666
667 ret = sedi_dma_control(dma, chan, SEDI_CONFIG_DMA_BURST_LENGTH,
668 DMA_BURST_TRANS_LENGTH_1);
669 DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
670
671 ret = sedi_dma_control(dma, chan, SEDI_CONFIG_DMA_SR_TRANS_WIDTH, wid);
672 DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
673
674 ret = sedi_dma_control(dma, chan, SEDI_CONFIG_DMA_DT_TRANS_WIDTH, wid);
675 DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
676
677 ret = sedi_dma_control(dma, chan, SEDI_CONFIG_DMA_HS_DEVICE_ID,
678 handshake);
679 DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
680
681 ret = sedi_dma_control(dma, chan, SEDI_CONFIG_DMA_HS_POLARITY,
682 DMA_HS_POLARITY_HIGH);
683 DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
684
685 ret = sedi_dma_control(dma, chan, SEDI_CONFIG_DMA_DIRECTION, dma_dir);
686 DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
687
688 ret = sedi_dma_control(dma, chan, SEDI_CONFIG_DMA_HS_DEVICE_ID_PER_DIR,
689 dma_per_dir);
690 DBG_CHECK(0 == ret, SEDI_DRIVER_ERROR);
691
692
693 return 0;
694 }
695
callback_dma_transfer(const sedi_dma_t dma,const int chan,const int event,void * param)696 static void callback_dma_transfer(const sedi_dma_t dma, const int chan,
697 const int event, void *param)
698 {
699 sedi_spi_t spi_device = (sedi_spi_t)param;
700
701 struct spi_context *context = &spi_contexts[spi_device];
702 uint32_t len = SPI_DMA_MAX_SIZE;
703
704 /* release the dma resource */
705 sedi_dma_set_power(dma, chan, SEDI_POWER_OFF);
706 sedi_dma_uninit(dma, chan);
707
708 if (event != SEDI_DMA_EVENT_TRANSFER_DONE) {
709 if (context->cb_event) {
710 context->cb_event(SEDI_SPI_EVENT_DATA_LOST,
711 context->cb_param);
712 }
713
714 goto f_out;
715 }
716
717 /* See tx or rx finished */
718 if (chan == context->tx_channel) {
719 context->dma_tx_finished = true;
720 context->data_tx_idx = context->tx_data_len;
721 /* Recover LSB reverse, DMA mode tx buff pointer not changed */
722 if (context->is_lsb == true) {
723 spi_bit_reverse(context->data_tx, context->tx_data_len,
724 context->frame_size);
725 sedi_core_clean_dcache_by_addr(
726 (uint32_t *)(context->data_tx),
727 context->tx_data_len);
728 }
729 /* Waiting for TX FIFO empty */
730 while (lld_spi_is_busy(context->base)) {
731 ;
732 }
733 } else if (chan == context->rx_channel) {
734 context->dma_rx_finished = true;
735 context->data_rx_idx = context->rx_data_len;
736 /* If finished Rx, and need to do bit convert */
737 if (context->is_lsb == true) {
738 #ifndef SEDI_CONFIG_ARCH_X86
739 /* Invalidate cache */
740 sedi_core_inv_dcache_by_addr(
741 (uint32_t *)(context->data_rx),
742 context->rx_data_len);
743 #endif
744 spi_bit_reverse(context->data_rx, context->rx_data_len,
745 context->frame_size);
746 sedi_core_clean_dcache_by_addr(
747 (uint32_t *)(context->data_rx),
748 context->rx_data_len);
749 }
750 }
751
752 if ((context->dma_tx_finished == false) ||
753 (context->dma_rx_finished == false)) {
754 return;
755 }
756
757 /* If need to start another DMA transfer */
758 context->dma_idx -= 1;
759 if (context->dma_idx > 0) {
760 if (context->dma_idx == 1) {
761 len = context->last_dma_counts;
762 }
763 /* According to different transfer mode, do different fill or receive */
764 if (context->transfer_mode == SPI_TRANSFER_MODE_SEND) {
765 context->data_tx += SPI_DMA_MAX_SIZE;
766 context->dma_tx_finished = false;
767 /* start dma first */
768 config_and_enable_dma_channel(spi_device, context->tx_dma,
769 context->dma_handshake, context->tx_channel,
770 0, 1, (uint32_t)(context->data_tx),
771 lld_spi_dr_address(context->base), len, true);
772 sedi_dma_start_transfer(context->tx_dma, context->tx_channel,
773 (uint32_t)(context->data_tx),
774 lld_spi_dr_address(context->base), len);
775
776 } else if (context->transfer_mode == SPI_TRANSFER_MODE_RECEIVE) {
777 context->data_rx += SPI_DMA_MAX_SIZE;
778 context->dma_rx_finished = false;
779 /* Configure rx channel */
780 context->base->ctrl1 = len / context->frame_size - 1;
781 sedi_dma_start_transfer(context->rx_dma, context->rx_channel,
782 lld_spi_dr_address(context->base),
783 (uint32_t)(context->data_rx), len);
784 config_and_enable_dma_channel(spi_device, context->rx_dma,
785 context->rx_handshake, context->rx_channel, 0,
786 1, lld_spi_dr_address(context->base),
787 (uint32_t)(context->data_rx), len, false);
788
789 } else {
790 context->data_tx += SPI_DMA_MAX_SIZE;
791 context->data_rx += SPI_DMA_MAX_SIZE;
792 context->dma_tx_finished = false;
793 context->dma_rx_finished = false;
794 /* Enable both channel to do transfer */
795 config_and_enable_dma_channel(spi_device, context->tx_dma,
796 context->dma_handshake, context->tx_channel,
797 0, 1, (uint32_t)(context->data_tx),
798 lld_spi_dr_address(context->base), len, true);
799 config_and_enable_dma_channel(spi_device, context->rx_dma,
800 context->rx_handshake, context->rx_channel, 0,
801 1, lld_spi_dr_address(context->base),
802 (uint32_t)(context->data_rx), len, false);
803 /* Enable both channel and start rx firstly to do transfer */
804 sedi_dma_start_transfer(context->rx_dma, context->rx_channel,
805 lld_spi_dr_address(context->base),
806 (uint32_t)(context->data_rx), len);
807 sedi_dma_start_transfer(context->tx_dma, context->tx_channel,
808 (uint32_t)(context->data_tx),
809 lld_spi_dr_address(context->base), len);
810 }
811
812 /* Return to start another transfer */
813 return;
814
815 }
816
817 /* All tx and rx finished */
818 if (context->cb_event) {
819 context->cb_event(SEDI_SPI_EVENT_COMPLETE, context->cb_param);
820 }
821
822 f_out:
823 /* clear spi busy status and disable spi dma*/
824 context->status.busy = 0;
825 lld_spi_config_interrupt(context->base, REG_INT_NONE);
826 lld_spi_enable(context->base, false);
827 lld_spi_dma_enable(context->base, false);
828 }
829 #endif
830
sedi_spi_dma_transfer(IN sedi_spi_t spi_device,IN uint32_t tx_dma,IN uint32_t tx_dma_chan,IN uint8_t * data_out,IN uint32_t rx_dma,IN uint32_t rx_dma_chan,OUT uint8_t * data_in,IN uint32_t num)831 int32_t sedi_spi_dma_transfer(IN sedi_spi_t spi_device, IN uint32_t tx_dma,
832 IN uint32_t tx_dma_chan, IN uint8_t *data_out,
833 IN uint32_t rx_dma, IN uint32_t rx_dma_chan,
834 OUT uint8_t *data_in, IN uint32_t num)
835 {
836 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
837 #ifdef SEDI_SPI_USE_DMA
838 struct spi_context *context = &spi_contexts[spi_device];
839 spi_reg_t *spi = context->base;
840 int tx_handshake = context->dma_handshake;
841 int rx_handshake = context->rx_handshake;
842 int width = context->frame_size;
843 int burst = 1;
844 uint32_t len = num;
845
846 DBG_CHECK(((num % context->frame_size) == 0),
847 SEDI_DRIVER_ERROR_PARAMETER);
848
849 if (context->status.busy) {
850 return SEDI_DRIVER_ERROR_BUSY;
851 }
852
853 context->status.busy = 1U;
854
855 context->base->dmatdlr = SPI_FIFO_DEPTH - 1;
856 context->base->dmardlr = 0;
857 context->tx_dma = tx_dma;
858 context->rx_dma = rx_dma;
859 context->tx_channel = tx_dma_chan;
860 context->rx_channel = rx_dma_chan;
861 context->dma_tx_finished = false;
862 context->dma_rx_finished = false;
863 context->tx_data_len = num;
864 context->rx_data_len = num;
865 context->data_tx = (uint8_t *)data_out;
866 context->data_rx = data_in;
867 /* DMA BLOCK TS only 4096, for large data more than 4K, use multiple transfer */
868 context->last_dma_counts = (num & (SPI_DMA_MAX_SIZE - 1));
869 if (context->last_dma_counts == 0) {
870 context->dma_cycles = num >> SPI_DMA_MAX_SIZE_SHIFT;
871 context->last_dma_counts = SPI_DMA_MAX_SIZE;
872 } else {
873 context->dma_cycles = (num >> SPI_DMA_MAX_SIZE_SHIFT) + 1;
874 }
875 context->dma_idx = context->dma_cycles;
876
877 if (context->dma_cycles > 1) {
878 len = SPI_DMA_MAX_SIZE;
879 }
880 #ifdef SPI_DW_2_0
881 /* Clear the bit field */
882 context->base->txftlr &= ~SPI_TXFTLR_TXFTHR_MASK;
883 #endif
884
885 /* Decide the transfer mode, send, receive or both */
886 lld_spi_set_transfer_mode(spi_device, data_out, data_in);
887
888 /* If need to bit reverse tx buffer */
889 if (context->is_lsb == true) {
890 if (context->transfer_mode != SPI_TRANSFER_MODE_RECEIVE) {
891 spi_bit_reverse(context->data_tx, context->tx_data_len,
892 context->frame_size);
893 /* Clean the cache for DMA transfer */
894 sedi_core_clean_dcache_by_addr(
895 (uint32_t *)(context->data_tx),
896 context->tx_data_len);
897 }
898 #ifdef SEDI_CONFIG_ARCH_X86
899 if (context->transfer_mode != SPI_TRANSFER_MODE_SEND) {
900 sedi_core_inv_clean_dcache_by_addr(
901 (uint32_t *)(context->data_rx),
902 context->rx_data_len);
903 }
904 #endif
905 }
906
907 /* According to different transfer mode, do different fill or receive */
908 if (context->transfer_mode == SPI_TRANSFER_MODE_SEND) {
909 /* start dma first */
910 config_and_enable_dma_channel(
911 spi_device, tx_dma, tx_handshake, tx_dma_chan, width, burst,
912 (uint32_t)data_out, lld_spi_dr_address(context->base), len,
913 true);
914 context->dma_rx_finished = true;
915 context->rx_channel = 0xFF;
916 #ifdef SPI_DW_2_0
917 dw_spi_set_start_condition(context, len);
918 #endif
919 } else if (context->transfer_mode == SPI_TRANSFER_MODE_RECEIVE) {
920 /* Send dummy data first */
921 if (context->is_quad == false) {
922 lld_spi_fill_fifo(spi_device, context->frame_size, NULL,
923 context->frame_size);
924 }
925
926 /* Configure rx channel */
927 config_and_enable_dma_channel(spi_device, rx_dma, rx_handshake,
928 rx_dma_chan, width, burst,
929 lld_spi_dr_address(context->base),
930 (uint32_t)data_in, len, false);
931 /* Set NDF bits for receive only mode */
932 DBG_CHECK((len <= SPI_RECEIVE_MODE_MAX_SIZE),
933 SEDI_DRIVER_ERROR_PARAMETER);
934 context->base->ctrl1 = len / context->frame_size - 1;
935 context->dma_tx_finished = true;
936 context->tx_channel = 0xFF;
937 } else {
938 /* Enable both channel to do transfer */
939 config_and_enable_dma_channel(
940 spi_device, tx_dma, tx_handshake, tx_dma_chan, width, burst,
941 (uint32_t)data_out, lld_spi_dr_address(context->base), len,
942 true);
943 config_and_enable_dma_channel(spi_device, rx_dma, rx_handshake,
944 rx_dma_chan, width, burst,
945 lld_spi_dr_address(context->base),
946 (uint32_t)data_in, len, false);
947 #ifdef SPI_DW_2_0
948 dw_spi_set_start_condition(context, len);
949 #endif
950 }
951
952 lld_spi_config_interrupt(context->base, REG_INT_ERROR);
953
954 lld_spi_dma_enable(context->base, true);
955
956 lld_spi_enable(context->base, true);
957
958 if ((context->is_quad) && (context->quad_config)) {
959 const sedi_spi_enhanced_config_t *config = context->quad_config;
960
961 /* Instruction need 1 entry */
962 spi->dr = *(config->inst_buf);
963 spi->dr = *((uint32_t *)(config->addr_buf));
964 /* After fill in addr and instruction, no need to keep quad state,
965 just transfer data as standard SPI */
966 context->is_quad = false;
967 context->quad_config = NULL;
968 }
969
970 if (context->transfer_mode == SPI_TRANSFER_MODE_SEND) {
971 sedi_dma_start_transfer(tx_dma, tx_dma_chan, (uint32_t)data_out,
972 lld_spi_dr_address(context->base), len);
973 } else if (context->transfer_mode == SPI_TRANSFER_MODE_RECEIVE) {
974 sedi_dma_start_transfer(rx_dma, rx_dma_chan, lld_spi_dr_address(context->base),
975 (uint32_t)data_in, len);
976 } else {
977 /* the SPI transaction may interrupted by some other events between Tx/Rx dma
978 * enable, which probably lead to rx fifo overflow, start rx channel firstly.
979 */
980 sedi_dma_start_transfer(rx_dma, rx_dma_chan, lld_spi_dr_address(context->base),
981 (uint32_t)data_in, len);
982 sedi_dma_start_transfer(tx_dma, tx_dma_chan, (uint32_t)data_out,
983 lld_spi_dr_address(context->base), len);
984 }
985
986 #endif
987 return SEDI_DRIVER_OK;
988 }
989
sedi_spi_poll_transfer(IN sedi_spi_t spi_device,IN uint8_t * data_out,OUT uint8_t * data_in,IN uint32_t num)990 int32_t sedi_spi_poll_transfer(IN sedi_spi_t spi_device, IN uint8_t *data_out,
991 OUT uint8_t *data_in, IN uint32_t num)
992 {
993 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
994
995 uint32_t tx_num = num, rx_num = num, fill_num = 0, receive_num = 0;
996 struct spi_context *context = &spi_contexts[spi_device];
997
998 DBG_CHECK(((num % context->frame_size) == 0),
999 SEDI_DRIVER_ERROR_PARAMETER);
1000
1001 if (context->status.busy) {
1002 return SEDI_DRIVER_ERROR_BUSY;
1003 }
1004
1005 context->status.busy = 1U;
1006 context->data_tx = (void *)data_out;
1007 context->data_rx = (void *)data_in;
1008 context->tx_data_len = num;
1009 context->rx_data_len = num;
1010 context->data_tx_idx = 0;
1011 context->data_rx_idx = 0;
1012 #ifdef SPI_DW_2_0
1013 /* Clear the bit field */
1014 context->base->txftlr &= ~SPI_TXFTLR_TXFTHR_MASK;
1015 #endif
1016
1017 /* Decide the transfer mode, send, receive or both */
1018 lld_spi_set_transfer_mode(spi_device, data_out, data_in);
1019
1020 /* First convert tx buffer */
1021 if ((context->transfer_mode != SPI_TRANSFER_MODE_RECEIVE) &&
1022 (context->is_lsb == true)) {
1023 spi_bit_reverse(context->data_tx, context->tx_data_len,
1024 context->frame_size);
1025 }
1026
1027 /* According to different transfer mode, do different fill or receive */
1028 if (context->transfer_mode == SPI_TRANSFER_MODE_SEND) {
1029 rx_num = 0;
1030 } else if (context->transfer_mode == SPI_TRANSFER_MODE_RECEIVE) {
1031 tx_num = context->frame_size; /* Shall send at least one data
1032 for receive */
1033 DBG_CHECK((num <= SPI_RECEIVE_MODE_MAX_SIZE),
1034 SEDI_DRIVER_ERROR_PARAMETER);
1035 context->base->ctrl1 = num / context->frame_size - 1;
1036 }
1037
1038 lld_spi_enable(context->base, true);
1039
1040 /* First send some data in both transfer mode */
1041 fill_num = lld_spi_fill_fifo(spi_device, context->frame_size, data_out,
1042 tx_num);
1043 /* Update buffer and number */
1044 if (data_out) {
1045 data_out += fill_num;
1046 }
1047 tx_num -= fill_num;
1048
1049 while ((tx_num > 0) || (rx_num > 0)) {
1050 if (tx_num > 0) {
1051 /* First send some data */
1052 fill_num = lld_spi_fill_fifo(
1053 spi_device, context->frame_size, data_out, tx_num);
1054 /* Update buffer and number */
1055 data_out += fill_num;
1056 tx_num -= fill_num;
1057 }
1058
1059 if (rx_num > 0) {
1060 /* Receive some data */
1061 receive_num = lld_spi_receive_fifo(context->base,
1062 context->frame_size,
1063 data_in, rx_num);
1064 data_in += receive_num;
1065 rx_num -= receive_num;
1066 }
1067 }
1068
1069 /* Waiting for SPI idle */
1070 while (lld_spi_is_busy(context->base))
1071 ;
1072 lld_spi_enable(context->base, false);
1073
1074 context->status.busy = 0U;
1075 context->data_tx_idx = num;
1076 context->data_rx_idx = num;
1077
1078 /* If has rx buffer and need bit reverse */
1079 if ((context->transfer_mode != SPI_TRANSFER_MODE_SEND) &&
1080 (context->is_lsb == true)) {
1081 spi_bit_reverse(context->data_rx, context->rx_data_len,
1082 context->frame_size);
1083 }
1084
1085 /* If need to recover tx buffer */
1086 if ((context->transfer_mode != SPI_TRANSFER_MODE_RECEIVE) &&
1087 (context->is_lsb == true)) {
1088 spi_bit_reverse(context->data_tx, context->tx_data_len,
1089 context->frame_size);
1090 }
1091 return SEDI_DRIVER_OK;
1092 }
1093
sedi_spi_transfer(IN sedi_spi_t spi_device,IN uint8_t * data_out,OUT uint8_t * data_in,IN uint32_t num)1094 int32_t sedi_spi_transfer(IN sedi_spi_t spi_device, IN uint8_t *data_out,
1095 OUT uint8_t *data_in, IN uint32_t num)
1096 {
1097 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
1098
1099 struct spi_context *context = &spi_contexts[spi_device];
1100 spi_reg_t *spi = context->base;
1101 uint32_t send_count = num;
1102
1103 DBG_CHECK(((num % context->frame_size) == 0),
1104 SEDI_DRIVER_ERROR_PARAMETER);
1105
1106 if (context->status.busy) {
1107 return SEDI_DRIVER_ERROR_BUSY;
1108 }
1109
1110 /* For transfer size less than watermark */
1111 if (num < context->rx_watermark * context->frame_size) {
1112 /* Only shall reset the receive watermark to finish trigger
1113 * interrupt */
1114 lld_spi_set_rx_watermark(context->base,
1115 num / context->frame_size);
1116 } else {
1117 lld_spi_set_rx_watermark(context->base,
1118 (SPI_FIFO_DEPTH / 2 - 1) *
1119 context->frame_size);
1120 }
1121
1122 lld_spi_set_transfer_mode(spi_device, data_out, data_in);
1123
1124 /* For IRQ mode only, if use multiple buffers, cannot change mode in
1125 * transfer */
1126 if ((context->is_cs_continuous == true) && (!context->is_quad)) {
1127 spi->ctrl0 &= ~SPI_CTRL0_TMOD_MASK;
1128 spi->ctrl0 |= SPI_CTRL0_BOTH_MODE;
1129 context->transfer_mode = SPI_TRANSFER_MODE_BOTH;
1130 }
1131
1132 context->status.busy = 1U;
1133
1134 context->data_tx = (void *)data_out;
1135 context->data_rx = (void *)data_in;
1136 context->tx_data_len = num;
1137 context->rx_data_len = num;
1138 context->data_tx_idx = 0;
1139 context->data_rx_idx = 0;
1140 #ifdef SPI_DW_2_0
1141 /* Clear the bit field */
1142 spi->txftlr &= ~SPI_TXFTLR_TXFTHR_MASK;
1143 #endif
1144
1145 /* First convert tx buffer */
1146 if ((context->transfer_mode != SPI_TRANSFER_MODE_RECEIVE) &&
1147 (context->is_lsb == true)) {
1148 spi_bit_reverse(context->data_tx, context->tx_data_len,
1149 context->frame_size);
1150 }
1151
1152 /* According to different transfer mode, do different fill or receive */
1153 if (context->transfer_mode == SPI_TRANSFER_MODE_SEND) {
1154 context->data_rx_idx = num;
1155 } else if (context->transfer_mode == SPI_TRANSFER_MODE_RECEIVE) {
1156 send_count = context->frame_size;
1157 DBG_CHECK((num <= SPI_RECEIVE_MODE_MAX_SIZE),
1158 SEDI_DRIVER_ERROR_PARAMETER);
1159 context->base->ctrl1 = num / context->frame_size - 1;
1160 /* Write into FIFO needs to enable SPI first */
1161 lld_spi_enable(context->base, true);
1162 lld_spi_fill_fifo(spi_device, context->frame_size, data_out,
1163 send_count);
1164 context->data_tx_idx = num;
1165 }
1166
1167 #ifdef SPI_DW_2_0
1168 if (context->transfer_mode != SPI_TRANSFER_MODE_RECEIVE) {
1169 dw_spi_set_start_condition(context, num);
1170 }
1171 #endif
1172
1173 lld_spi_enable(context->base, true);
1174
1175 lld_spi_config_interrupt(context->base,
1176 REG_INT_TX | REG_INT_RX | REG_INT_ERROR);
1177
1178 return SEDI_DRIVER_OK;
1179 }
1180
spi_enhanced_config(IN sedi_spi_t spi_device,IN sedi_spi_enhanced_config_t * config)1181 static int32_t spi_enhanced_config(IN sedi_spi_t spi_device,
1182 IN sedi_spi_enhanced_config_t *config)
1183 {
1184 struct spi_context *context = &spi_contexts[spi_device];
1185 spi_reg_t *spi = context->base;
1186 uint32_t val;
1187
1188 if ((spi->ctrl0 & SPI_FRAME_FORMAT_MASK) == 0) {
1189 /* single mode no need to configure */
1190 return 0;
1191 }
1192
1193 /* inst is must, address is option */
1194 if ((config->inst_buf == NULL) || (config->inst_len == 0)) {
1195 return SEDI_DRIVER_ERROR_PARAMETER;
1196 }
1197
1198 context->is_quad = true;
1199 context->quad_config = config;
1200
1201 /* Disable spi first to set registers */
1202 lld_spi_enable(spi, false);
1203
1204 /* Config SPI_CTRL0 register */
1205 spi->spi_ctrl0 = 0;
1206 val = ((config->inst_len << SPI_CTRLR0_INST_L_SHIFT) |
1207 (config->addr_len << SPI_CTRLR0_ADDR_L_SHIFT) |
1208 (config->dummy_cycles << SPI_CTRLR0_WAIT_CYCLE_SHIFT) |
1209 config->mode);
1210 spi->spi_ctrl0 = val;
1211
1212 return 0;
1213 }
1214
sedi_spi_enhanced_transfer(IN sedi_spi_t spi_device,IN sedi_spi_enhanced_config_t * config)1215 int32_t sedi_spi_enhanced_transfer(IN sedi_spi_t spi_device,
1216 IN sedi_spi_enhanced_config_t *config)
1217 {
1218 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
1219
1220 /* Handle enhanced spi config */
1221 spi_enhanced_config(spi_device, config);
1222
1223 /* start transfer */
1224 return sedi_spi_transfer(spi_device, config->tx_buf, config->rx_buf,
1225 config->data_len);
1226 }
1227
sedi_spi_dma_enhanced_transfer(IN sedi_spi_t spi_device,IN uint32_t tx_dma,IN uint32_t tx_dma_chan,IN uint32_t rx_dma,IN uint32_t rx_dma_chan,IN sedi_spi_enhanced_config_t * config)1228 int32_t sedi_spi_dma_enhanced_transfer(IN sedi_spi_t spi_device, IN uint32_t tx_dma,
1229 IN uint32_t tx_dma_chan, IN uint32_t rx_dma,
1230 IN uint32_t rx_dma_chan,
1231 IN sedi_spi_enhanced_config_t *config)
1232 {
1233 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
1234
1235 /* Handle enhanced spi config */
1236 spi_enhanced_config(spi_device, config);
1237
1238 /* start transfer */
1239 return sedi_spi_dma_transfer(spi_device, tx_dma, tx_dma_chan,
1240 config->tx_buf, rx_dma, rx_dma_chan,
1241 config->rx_buf, config->data_len);
1242 }
1243
sedi_spi_update_tx_buf(IN sedi_spi_t spi_device,IN uint8_t * tx_buf,IN uint32_t len)1244 int32_t sedi_spi_update_tx_buf(IN sedi_spi_t spi_device, IN uint8_t *tx_buf,
1245 IN uint32_t len)
1246 {
1247 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
1248
1249 struct spi_context *context = &spi_contexts[spi_device];
1250
1251 DBG_CHECK(((len % context->frame_size) == 0),
1252 SEDI_DRIVER_ERROR_PARAMETER);
1253
1254 /* This function can only used in continuous mode */
1255 DBG_CHECK((context->is_cs_continuous == true),
1256 SEDI_DRIVER_ERROR_UNSUPPORTED);
1257
1258 if (len == 0) {
1259 return SEDI_DRIVER_ERROR_PARAMETER;
1260 }
1261 /* As continuous mode all use both transfer mode, rx also need to update
1262 * length */
1263 context->data_tx = (void *)tx_buf;
1264 context->tx_data_len += len;
1265
1266 return SEDI_DRIVER_OK;
1267 }
1268
sedi_spi_update_rx_buf(IN sedi_spi_t spi_device,OUT uint8_t * rx_buf,IN uint32_t len)1269 int32_t sedi_spi_update_rx_buf(IN sedi_spi_t spi_device, OUT uint8_t *rx_buf,
1270 IN uint32_t len)
1271 {
1272 DBG_CHECK(spi_device < SEDI_SPI_NUM, SEDI_DRIVER_ERROR_PARAMETER);
1273
1274 struct spi_context *context = &spi_contexts[spi_device];
1275
1276 DBG_CHECK(((len % context->frame_size) == 0),
1277 SEDI_DRIVER_ERROR_PARAMETER);
1278
1279 /* This function can only used in continuous mode */
1280 DBG_CHECK((context->is_cs_continuous == true),
1281 SEDI_DRIVER_ERROR_UNSUPPORTED);
1282
1283 if (len == 0) {
1284 return SEDI_DRIVER_ERROR_PARAMETER;
1285 }
1286
1287 /* As continuous mode all use both transfer mode, rx also need to update
1288 * length */
1289 context->data_rx = (void *)rx_buf;
1290 context->rx_data_len += len;
1291
1292 return SEDI_DRIVER_OK;
1293 }
1294
spi_isr(IN sedi_spi_t spi_device)1295 void spi_isr(IN sedi_spi_t spi_device)
1296 {
1297 struct spi_context *context = &spi_contexts[spi_device];
1298 uint32_t intr_stat;
1299 int error = false;
1300 int end = false;
1301 int event;
1302 int idx;
1303 uint32_t temp, rx_len;
1304
1305 intr_stat = lld_spi_interrupt_clear(context->base);
1306
1307 /* To reduce the interrupt times, send/receive as many as possible */
1308 if (intr_stat & (REG_INT_RX | REG_INT_TX)) {
1309 while (context->data_tx_idx < context->tx_data_len) {
1310 temp = context->tx_data_len - context->data_tx_idx;
1311 idx = lld_spi_fill_fifo(spi_device, context->frame_size,
1312 context->data_tx, temp);
1313
1314 context->data_tx_idx += idx;
1315 if (context->data_tx != NULL) {
1316 context->data_tx += idx;
1317 }
1318
1319 if ((context->data_tx_idx == context->tx_data_len) &&
1320 (context->cb_event)) {
1321 context->cb_event(SEDI_SPI_EVENT_TX_FINISHED,
1322 context->cb_param);
1323 }
1324
1325 if (idx < temp) {
1326 /* If last transfer filled FIFO full, break */
1327 break;
1328 }
1329 }
1330
1331 while (context->data_rx_idx < context->rx_data_len) {
1332 rx_len = context->rx_data_len - context->data_rx_idx;
1333 idx = lld_spi_receive_fifo(context->base,
1334 context->frame_size,
1335 context->data_rx, rx_len);
1336
1337 context->data_rx_idx += idx;
1338 if (context->data_rx != NULL) {
1339 context->data_rx += idx;
1340 }
1341
1342 /*Check if need to modify watermark for last transfer*/
1343 if ((context->rx_data_len - context->data_rx_idx <
1344 context->frame_size * context->rx_watermark) &&
1345 (context->rx_data_len != context->data_rx_idx)) {
1346 temp = (context->rx_data_len -
1347 context->data_rx_idx) /
1348 context->frame_size;
1349 lld_spi_set_rx_watermark(context->base, temp);
1350 context->rx_watermark = temp;
1351 }
1352
1353 if ((context->data_rx_idx == context->rx_data_len) &&
1354 (context->cb_event)) {
1355 context->cb_event(SEDI_SPI_EVENT_RX_FINISHED,
1356 context->cb_param);
1357 }
1358
1359 if (idx < rx_len) {
1360 /* If last transfer received all data in FIFO,
1361 * break */
1362 break;
1363 }
1364 }
1365 }
1366
1367 if ((context->data_rx_idx == context->tx_data_len) &&
1368 (context->data_tx_idx == context->rx_data_len)) {
1369 end = true;
1370 event = SEDI_SPI_EVENT_COMPLETE;
1371 /* Wait for Data in FIFO send out while not continuous */
1372 while (lld_spi_is_busy(context->base))
1373 ;
1374
1375 /* If need to reverse rx buffer */
1376 if ((context->transfer_mode != SPI_TRANSFER_MODE_SEND) &&
1377 (context->is_lsb == true)) {
1378 context->data_rx -= context->data_rx_idx;
1379 spi_bit_reverse(context->data_rx, context->rx_data_len,
1380 context->frame_size);
1381 }
1382 /* If need to recover tx buffer */
1383 if ((context->transfer_mode != SPI_TRANSFER_MODE_RECEIVE) &&
1384 (context->is_lsb == true)) {
1385 context->data_tx -= context->data_tx_idx;
1386 spi_bit_reverse(context->data_tx, context->tx_data_len,
1387 context->frame_size);
1388 }
1389 }
1390
1391 if (intr_stat & REG_INT_ERROR) {
1392 error = true;
1393 event = SEDI_SPI_EVENT_DATA_LOST;
1394 context->status.data_lost = true;
1395 }
1396
1397 if ((error || end) && (context->status.busy != 0)) {
1398 context->status.busy = 0;
1399 lld_spi_config_interrupt(context->base, REG_INT_NONE);
1400 lld_spi_enable(context->base, false);
1401
1402 if (context->cb_event) {
1403 context->cb_event(event, context->cb_param);
1404 }
1405 }
1406 }
1407
SEDI_ISR_DECLARE(sedi_spi_0_isr)1408 SEDI_ISR_DECLARE(sedi_spi_0_isr)
1409 {
1410 spi_isr(SEDI_SPI_0);
1411 }
1412
1413 #ifdef SEDI_SPI_1_REG_BASE
SEDI_ISR_DECLARE(sedi_spi_1_isr)1414 SEDI_ISR_DECLARE(sedi_spi_1_isr)
1415 {
1416 spi_isr(SEDI_SPI_1);
1417 }
1418 #endif
1419