1 /*
2 * Copyright (c) 2021 Telink Semiconductor
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT telink_b91_spi
8
9 /* Redefine 'spi_read' and 'spi_write' functions names from HAL */
10 #define spi_read hal_spi_read
11 #define spi_write hal_spi_write
12 #include "spi.c"
13 #undef spi_read
14 #undef spi_write
15
16 #include "clock.h"
17
18 #include <zephyr/logging/log.h>
19 LOG_MODULE_REGISTER(spi_telink);
20
21 #include <zephyr/drivers/spi.h>
22 #include <zephyr/drivers/spi/rtio.h>
23 #include "spi_context.h"
24 #include <zephyr/drivers/pinctrl.h>
25
26
27 #define CHIP_SELECT_COUNT 3u
28 #define SPI_WORD_SIZE 8u
29 #define SPI_WR_RD_CHUNK_SIZE_MAX 16u
30
31
32 /* SPI configuration structure */
33 struct spi_b91_cfg {
34 uint8_t peripheral_id;
35 gpio_pin_e cs_pin[CHIP_SELECT_COUNT];
36 const struct pinctrl_dev_config *pcfg;
37 };
38 #define SPI_CFG(dev) ((struct spi_b91_cfg *) ((dev)->config))
39
40 /* SPI data structure */
41 struct spi_b91_data {
42 struct spi_context ctx;
43 };
44 #define SPI_DATA(dev) ((struct spi_b91_data *) ((dev)->data))
45
46
47 /* disable hardware cs flow control */
spi_b91_hw_cs_disable(const struct spi_b91_cfg * config)48 static void spi_b91_hw_cs_disable(const struct spi_b91_cfg *config)
49 {
50 gpio_pin_e pin;
51
52 /* loop through all cs pins (cs0..cs2) */
53 for (int i = 0; i < CHIP_SELECT_COUNT; i++) {
54 /* get CS pin defined in device tree */
55 pin = config->cs_pin[i];
56
57 /* if CS pin is defined in device tree */
58 if (pin != 0) {
59 if (config->peripheral_id == PSPI_MODULE) {
60 /* disable CS pin for PSPI */
61 pspi_cs_pin_dis(pin);
62 } else {
63 /* disable CS pin for MSPI */
64 hspi_cs_pin_dis(pin);
65 }
66 }
67 }
68 }
69
70 /* config cs flow control: hardware or software */
spi_b91_config_cs(const struct device * dev,const struct spi_config * config)71 static bool spi_b91_config_cs(const struct device *dev,
72 const struct spi_config *config)
73 {
74 pspi_csn_pin_def_e cs_pin = 0;
75 const struct spi_b91_cfg *b91_config = SPI_CFG(dev);
76
77 /* software flow control */
78 if (spi_cs_is_gpio(config)) {
79 /* disable all hardware CS pins */
80 spi_b91_hw_cs_disable(b91_config);
81 return true;
82 }
83
84 /* hardware flow control */
85
86 /* check for correct slave id */
87 if (config->slave >= CHIP_SELECT_COUNT) {
88 LOG_ERR("Slave %d not supported (max. %d)", config->slave, CHIP_SELECT_COUNT - 1);
89 return false;
90 }
91
92 /* loop through all cs pins: cs0, cs1 and cs2 */
93 for (int cs_id = 0; cs_id < CHIP_SELECT_COUNT; cs_id++) {
94 /* get cs pin defined in device tree */
95 cs_pin = b91_config->cs_pin[cs_id];
96
97 /* if cs pin is not defined for the selected slave, return error */
98 if ((cs_pin == 0) && (cs_id == config->slave)) {
99 LOG_ERR("cs%d-pin is not defined in device tree", config->slave);
100 return false;
101 }
102
103 /* disable cs pin if it is defined and is not requested */
104 if ((cs_pin != 0) && (cs_id != config->slave)) {
105 if (b91_config->peripheral_id == PSPI_MODULE) {
106 pspi_cs_pin_dis(cs_pin);
107 } else {
108 hspi_cs_pin_dis(cs_pin);
109 }
110 }
111
112 /* enable cs pin if it is defined and is requested */
113 if ((cs_pin != 0) && (cs_id == config->slave)) {
114 if (b91_config->peripheral_id == PSPI_MODULE) {
115 pspi_set_pin_mux(cs_pin);
116 pspi_cs_pin_en(cs_pin);
117 } else {
118 hspi_set_pin_mux(cs_pin);
119 hspi_cs_pin_en(cs_pin);
120 }
121 }
122 }
123
124 return true;
125 }
126
127 /* get spi transaction length */
spi_b91_get_txrx_len(const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)128 static uint32_t spi_b91_get_txrx_len(const struct spi_buf_set *tx_bufs,
129 const struct spi_buf_set *rx_bufs)
130 {
131 uint32_t len_tx = 0;
132 uint32_t len_rx = 0;
133 const struct spi_buf *tx_buf = tx_bufs->buffers;
134 const struct spi_buf *rx_buf = rx_bufs->buffers;
135
136 /* calculate tx len */
137 for (int i = 0; i < tx_bufs->count; i++) {
138 len_tx += tx_buf->len;
139 tx_buf++;
140 }
141
142 /* calculate rx len */
143 for (int i = 0; i < rx_bufs->count; i++) {
144 len_rx += rx_buf->len;
145 rx_buf++;
146 }
147
148 return MAX(len_tx, len_rx);
149 }
150
151 /* process tx data */
152 _attribute_ram_code_sec_
spi_b91_tx(uint8_t peripheral_id,struct spi_context * ctx,uint8_t len)153 static void spi_b91_tx(uint8_t peripheral_id, struct spi_context *ctx, uint8_t len)
154 {
155 uint8_t tx;
156
157 for (int i = 0; i < len; i++) {
158 if (spi_context_tx_buf_on(ctx)) {
159 tx = *(uint8_t *)(ctx->tx_buf);
160 } else {
161 tx = 0;
162 }
163 spi_context_update_tx(ctx, 1, 1);
164 while (reg_spi_fifo_state(peripheral_id) & FLD_SPI_TXF_FULL) {
165 };
166 reg_spi_wr_rd_data(peripheral_id, i % 4) = tx;
167 }
168 }
169
170 /* process rx data */
171 _attribute_ram_code_sec_
spi_b91_rx(uint8_t peripheral_id,struct spi_context * ctx,uint8_t len)172 static void spi_b91_rx(uint8_t peripheral_id, struct spi_context *ctx, uint8_t len)
173 {
174 uint8_t rx = 0;
175
176 for (int i = 0; i < len; i++) {
177 while (reg_spi_fifo_state(peripheral_id) & FLD_SPI_RXF_EMPTY) {
178 };
179 rx = reg_spi_wr_rd_data(peripheral_id, i % 4);
180
181 if (spi_context_rx_buf_on(ctx)) {
182 *ctx->rx_buf = rx;
183 }
184 spi_context_update_rx(ctx, 1, 1);
185 }
186 }
187
188 /* SPI transceive internal */
189 _attribute_ram_code_sec_
spi_b91_txrx(const struct device * dev,uint32_t len)190 static void spi_b91_txrx(const struct device *dev, uint32_t len)
191 {
192 unsigned int chunk_size = SPI_WR_RD_CHUNK_SIZE_MAX;
193 struct spi_b91_cfg *cfg = SPI_CFG(dev);
194 struct spi_context *ctx = &SPI_DATA(dev)->ctx;
195
196 /* prepare SPI module */
197 spi_set_transmode(cfg->peripheral_id, SPI_MODE_WRITE_AND_READ);
198 spi_set_cmd(cfg->peripheral_id, 0);
199 spi_tx_cnt(cfg->peripheral_id, len);
200 spi_rx_cnt(cfg->peripheral_id, len);
201
202 /* write and read bytes in chunks */
203 for (int i = 0; i < len; i = i + chunk_size) {
204 /* check for tail */
205 if (chunk_size > (len - i)) {
206 chunk_size = len - i;
207 }
208
209 /* write bytes */
210 spi_b91_tx(cfg->peripheral_id, ctx, chunk_size);
211
212 /* read bytes */
213 if (len <= SPI_WR_RD_CHUNK_SIZE_MAX) {
214 /* read all bytes if len is less than chunk size */
215 spi_b91_rx(cfg->peripheral_id, ctx, chunk_size);
216 } else if (i == 0) {
217 /* head, read 1 byte less than is sent */
218 spi_b91_rx(cfg->peripheral_id, ctx, chunk_size - 1);
219 } else if ((len - i) > SPI_WR_RD_CHUNK_SIZE_MAX) {
220 /* body, read so many bytes as is sent*/
221 spi_b91_rx(cfg->peripheral_id, ctx, chunk_size);
222 } else {
223 /* tail, read the rest bytes */
224 spi_b91_rx(cfg->peripheral_id, ctx, chunk_size + 1);
225 }
226
227 /* clear TX and RX fifo */
228 BM_SET(reg_spi_fifo_state(cfg->peripheral_id), FLD_SPI_TXF_CLR);
229 BM_SET(reg_spi_fifo_state(cfg->peripheral_id), FLD_SPI_RXF_CLR);
230 }
231
232 /* wait for SPI is ready */
233 while (spi_is_busy(cfg->peripheral_id)) {
234 };
235
236 /* context complete */
237 spi_context_complete(ctx, dev, 0);
238 }
239
240 /* Check for supported configuration */
spi_b91_is_config_supported(const struct spi_config * config,struct spi_b91_cfg * b91_config)241 static bool spi_b91_is_config_supported(const struct spi_config *config,
242 struct spi_b91_cfg *b91_config)
243 {
244 if (config->operation & SPI_HALF_DUPLEX) {
245 LOG_ERR("Half-duplex not supported");
246 return false;
247 }
248
249 /* check for loop back */
250 if (config->operation & SPI_MODE_LOOP) {
251 LOG_ERR("Loop back mode not supported");
252 return false;
253 }
254
255 /* check for transfer LSB first */
256 if (config->operation & SPI_TRANSFER_LSB) {
257 LOG_ERR("LSB first not supported");
258 return false;
259 }
260
261 /* check word size */
262 if (SPI_WORD_SIZE_GET(config->operation) != SPI_WORD_SIZE) {
263 LOG_ERR("Word size must be %d", SPI_WORD_SIZE);
264 return false;
265 }
266
267 /* check for CS active high */
268 if (config->operation & SPI_CS_ACTIVE_HIGH) {
269 LOG_ERR("CS active high not supported for HW flow control");
270 return false;
271 }
272
273 /* check for lines configuration */
274 if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES)) {
275 if ((config->operation & SPI_LINES_MASK) == SPI_LINES_OCTAL) {
276 LOG_ERR("SPI lines Octal is not supported");
277 return false;
278 } else if (((config->operation & SPI_LINES_MASK) ==
279 SPI_LINES_QUAD) &&
280 (b91_config->peripheral_id == PSPI_MODULE)) {
281 LOG_ERR("SPI lines Quad is not supported by PSPI");
282 return false;
283 }
284 }
285
286 /* check for slave configuration */
287 if (SPI_OP_MODE_GET(config->operation) == SPI_OP_MODE_SLAVE) {
288 LOG_ERR("SPI Slave is not implemented");
289 return -ENOTSUP;
290 }
291
292 return true;
293 }
294
295 /* SPI configuration */
spi_b91_config(const struct device * dev,const struct spi_config * config)296 static int spi_b91_config(const struct device *dev,
297 const struct spi_config *config)
298 {
299 int status = 0;
300 spi_mode_type_e mode = SPI_MODE0;
301 struct spi_b91_cfg *b91_config = SPI_CFG(dev);
302 struct spi_b91_data *b91_data = SPI_DATA(dev);
303 uint8_t clk_src = b91_config->peripheral_id == PSPI_MODULE ? sys_clk.pclk : sys_clk.hclk;
304
305 /* check for unsupported configuration */
306 if (!spi_b91_is_config_supported(config, b91_config)) {
307 return -ENOTSUP;
308 }
309
310 /* config slave selection (CS): hw or sw */
311 if (!spi_b91_config_cs(dev, config)) {
312 return -ENOTSUP;
313 }
314
315 /* get SPI mode */
316 if (((config->operation & SPI_MODE_CPHA) == 0) &&
317 ((config->operation & SPI_MODE_CPOL) == 0)) {
318 mode = SPI_MODE0;
319 } else if (((config->operation & SPI_MODE_CPHA) == 0) &&
320 ((config->operation & SPI_MODE_CPOL) == SPI_MODE_CPOL)) {
321 mode = SPI_MODE1;
322 } else if (((config->operation & SPI_MODE_CPHA) == SPI_MODE_CPHA) &&
323 ((config->operation & SPI_MODE_CPOL) == 0)) {
324 mode = SPI_MODE2;
325 } else if (((config->operation & SPI_MODE_CPHA) == SPI_MODE_CPHA) &&
326 ((config->operation & SPI_MODE_CPOL) == SPI_MODE_CPOL)) {
327 mode = SPI_MODE3;
328 }
329
330 /* init SPI master */
331 spi_master_init(b91_config->peripheral_id,
332 clk_src * 1000000 / (2 * config->frequency) - 1, mode);
333 spi_master_config(b91_config->peripheral_id, SPI_NOMAL);
334
335 /* set lines configuration */
336 if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES)) {
337 uint32_t lines = config->operation & SPI_LINES_MASK;
338
339 if (lines == SPI_LINES_SINGLE) {
340 spi_set_io_mode(b91_config->peripheral_id,
341 SPI_SINGLE_MODE);
342 } else if (lines == SPI_LINES_DUAL) {
343 spi_set_io_mode(b91_config->peripheral_id,
344 SPI_DUAL_MODE);
345 } else if (lines == SPI_LINES_QUAD) {
346 spi_set_io_mode(b91_config->peripheral_id,
347 HSPI_QUAD_MODE);
348 }
349 }
350
351 /* configure pins */
352 status = pinctrl_apply_state(b91_config->pcfg, PINCTRL_STATE_DEFAULT);
353 if (status < 0) {
354 LOG_ERR("Failed to configure SPI pins");
355 return status;
356 }
357
358 /* save context config */
359 b91_data->ctx.config = config;
360
361 return 0;
362 }
363
364 /* API implementation: init */
spi_b91_init(const struct device * dev)365 static int spi_b91_init(const struct device *dev)
366 {
367 int err;
368 struct spi_b91_data *data = SPI_DATA(dev);
369
370 err = spi_context_cs_configure_all(&data->ctx);
371 if (err < 0) {
372 return err;
373 }
374
375 spi_context_unlock_unconditionally(&data->ctx);
376
377 return 0;
378 }
379
380 /* API implementation: transceive */
spi_b91_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)381 static int spi_b91_transceive(const struct device *dev,
382 const struct spi_config *config,
383 const struct spi_buf_set *tx_bufs,
384 const struct spi_buf_set *rx_bufs)
385 {
386 int status = 0;
387 struct spi_b91_data *data = SPI_DATA(dev);
388 uint32_t txrx_len = spi_b91_get_txrx_len(tx_bufs, rx_bufs);
389
390 /* set configuration */
391 status = spi_b91_config(dev, config);
392 if (status) {
393 return status;
394 }
395
396 /* context setup */
397 spi_context_lock(&data->ctx, false, NULL, NULL, config);
398 spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
399
400 /* if cs is defined: software cs control, set active true */
401 if (spi_cs_is_gpio(config)) {
402 spi_context_cs_control(&data->ctx, true);
403 }
404
405 /* transceive data */
406 spi_b91_txrx(dev, txrx_len);
407
408 /* if cs is defined: software cs control, set active false */
409 if (spi_cs_is_gpio(config)) {
410 spi_context_cs_control(&data->ctx, false);
411 }
412
413 /* release context */
414 status = spi_context_wait_for_completion(&data->ctx);
415 spi_context_release(&data->ctx, status);
416
417 return status;
418 }
419
420 #ifdef CONFIG_SPI_ASYNC
421 /* API implementation: transceive_async */
spi_b91_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)422 static int spi_b91_transceive_async(const struct device *dev,
423 const struct spi_config *config,
424 const struct spi_buf_set *tx_bufs,
425 const struct spi_buf_set *rx_bufs,
426 spi_callback_t cb,
427 void *userdata)
428 {
429 ARG_UNUSED(dev);
430 ARG_UNUSED(config);
431 ARG_UNUSED(tx_bufs);
432 ARG_UNUSED(rx_bufs);
433 ARG_UNUSED(cb);
434 ARG_UNUSED(userdata);
435
436 return -ENOTSUP;
437 }
438 #endif /* CONFIG_SPI_ASYNC */
439
440 /* API implementation: release */
spi_b91_release(const struct device * dev,const struct spi_config * config)441 static int spi_b91_release(const struct device *dev,
442 const struct spi_config *config)
443 {
444 struct spi_b91_data *data = SPI_DATA(dev);
445
446 if (!spi_context_configured(&data->ctx, config)) {
447 return -EINVAL;
448 }
449
450 spi_context_unlock_unconditionally(&data->ctx);
451
452 return 0;
453 }
454
455 /* SPI driver APIs structure */
456 static DEVICE_API(spi, spi_b91_api) = {
457 .transceive = spi_b91_transceive,
458 .release = spi_b91_release,
459 #ifdef CONFIG_SPI_ASYNC
460 .transceive_async = spi_b91_transceive_async,
461 #endif /* CONFIG_SPI_ASYNC */
462 #ifdef CONFIG_SPI_RTIO
463 .iodev_submit = spi_rtio_iodev_default_submit,
464 #endif
465 };
466
467 /* SPI driver registration */
468 #define SPI_B91_INIT(inst) \
469 \
470 PINCTRL_DT_INST_DEFINE(inst); \
471 \
472 static struct spi_b91_data spi_b91_data_##inst = { \
473 SPI_CONTEXT_INIT_LOCK(spi_b91_data_##inst, ctx), \
474 SPI_CONTEXT_INIT_SYNC(spi_b91_data_##inst, ctx), \
475 SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(inst), ctx) \
476 }; \
477 \
478 static struct spi_b91_cfg spi_b91_cfg_##inst = { \
479 .peripheral_id = DT_INST_ENUM_IDX(inst, peripheral_id), \
480 .cs_pin[0] = DT_INST_STRING_TOKEN(inst, cs0_pin), \
481 .cs_pin[1] = DT_INST_STRING_TOKEN(inst, cs1_pin), \
482 .cs_pin[2] = DT_INST_STRING_TOKEN(inst, cs2_pin), \
483 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \
484 }; \
485 \
486 SPI_DEVICE_DT_INST_DEFINE(inst, spi_b91_init, \
487 NULL, \
488 &spi_b91_data_##inst, \
489 &spi_b91_cfg_##inst, \
490 POST_KERNEL, \
491 CONFIG_SPI_INIT_PRIORITY, \
492 &spi_b91_api);
493
494 DT_INST_FOREACH_STATUS_OKAY(SPI_B91_INIT)
495