1 /*
2  * Copyright (c) 2021 Telink Semiconductor
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT telink_b91_spi
8 
9 /*  Redefine 'spi_read' and 'spi_write' functions names from HAL */
10 #define spi_read    hal_spi_read
11 #define spi_write   hal_spi_write
12 #include "spi.c"
13 #undef spi_read
14 #undef spi_write
15 
16 #include "clock.h"
17 
18 #include <zephyr/logging/log.h>
19 LOG_MODULE_REGISTER(spi_telink);
20 
21 #include <zephyr/drivers/spi.h>
22 #include "spi_context.h"
23 #include <zephyr/drivers/pinctrl.h>
24 
25 
26 #define CHIP_SELECT_COUNT               3u
27 #define SPI_WORD_SIZE                   8u
28 #define SPI_WR_RD_CHUNK_SIZE_MAX        16u
29 
30 
31 /* SPI configuration structure */
32 struct spi_b91_cfg {
33 	uint8_t peripheral_id;
34 	gpio_pin_e cs_pin[CHIP_SELECT_COUNT];
35 	const struct pinctrl_dev_config *pcfg;
36 };
37 #define SPI_CFG(dev)                    ((struct spi_b91_cfg *) ((dev)->config))
38 
39 /* SPI data structure */
40 struct spi_b91_data {
41 	struct spi_context ctx;
42 };
43 #define SPI_DATA(dev)                   ((struct spi_b91_data *) ((dev)->data))
44 
45 
46 /* disable hardware cs flow control */
spi_b91_hw_cs_disable(const struct spi_b91_cfg * config)47 static void spi_b91_hw_cs_disable(const struct spi_b91_cfg *config)
48 {
49 	gpio_pin_e pin;
50 
51 	/* loop through all cs pins (cs0..cs2) */
52 	for (int i = 0; i < CHIP_SELECT_COUNT; i++) {
53 		/* get CS pin defined in device tree */
54 		pin = config->cs_pin[i];
55 
56 		/* if CS pin is defined in device tree */
57 		if (pin != 0) {
58 			if (config->peripheral_id == PSPI_MODULE) {
59 				/* disable CS pin for PSPI */
60 				pspi_cs_pin_dis(pin);
61 			} else {
62 				/* disable CS pin for MSPI */
63 				hspi_cs_pin_dis(pin);
64 			}
65 		}
66 	}
67 }
68 
69 /* config cs flow control: hardware or software */
spi_b91_config_cs(const struct device * dev,const struct spi_config * config)70 static bool spi_b91_config_cs(const struct device *dev,
71 			      const struct spi_config *config)
72 {
73 	pspi_csn_pin_def_e cs_pin = 0;
74 	const struct spi_b91_cfg *b91_config = SPI_CFG(dev);
75 
76 	/* software flow control */
77 	if (spi_cs_is_gpio(config)) {
78 		/* disable all hardware CS pins */
79 		spi_b91_hw_cs_disable(b91_config);
80 		return true;
81 	}
82 
83 	/* hardware flow control */
84 
85 	/* check for correct slave id */
86 	if (config->slave >= CHIP_SELECT_COUNT) {
87 		LOG_ERR("Slave %d not supported (max. %d)", config->slave, CHIP_SELECT_COUNT - 1);
88 		return false;
89 	}
90 
91 	/* loop through all cs pins: cs0, cs1 and cs2 */
92 	for (int cs_id = 0; cs_id < CHIP_SELECT_COUNT; cs_id++) {
93 		/* get cs pin defined in device tree */
94 		cs_pin = b91_config->cs_pin[cs_id];
95 
96 		/*  if cs pin is not defined for the selected slave, return error */
97 		if ((cs_pin == 0) && (cs_id == config->slave)) {
98 			LOG_ERR("cs%d-pin is not defined in device tree", config->slave);
99 			return false;
100 		}
101 
102 		/* disable cs pin if it is defined and is not requested */
103 		if ((cs_pin != 0) && (cs_id != config->slave)) {
104 			if (b91_config->peripheral_id == PSPI_MODULE) {
105 				pspi_cs_pin_dis(cs_pin);
106 			} else {
107 				hspi_cs_pin_dis(cs_pin);
108 			}
109 		}
110 
111 		/* enable cs pin if it is defined and is requested */
112 		if ((cs_pin != 0) && (cs_id == config->slave)) {
113 			if (b91_config->peripheral_id == PSPI_MODULE) {
114 				pspi_set_pin_mux(cs_pin);
115 				pspi_cs_pin_en(cs_pin);
116 			} else {
117 				hspi_set_pin_mux(cs_pin);
118 				hspi_cs_pin_en(cs_pin);
119 			}
120 		}
121 	}
122 
123 	return true;
124 }
125 
126 /* get spi transaction length */
spi_b91_get_txrx_len(const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)127 static uint32_t spi_b91_get_txrx_len(const struct spi_buf_set *tx_bufs,
128 				     const struct spi_buf_set *rx_bufs)
129 {
130 	uint32_t len_tx = 0;
131 	uint32_t len_rx = 0;
132 	const struct spi_buf *tx_buf = tx_bufs->buffers;
133 	const struct spi_buf *rx_buf = rx_bufs->buffers;
134 
135 	/* calculate tx len */
136 	for (int i = 0; i < tx_bufs->count; i++) {
137 		len_tx += tx_buf->len;
138 		tx_buf++;
139 	}
140 
141 	/* calculate rx len */
142 	for (int i = 0; i < rx_bufs->count; i++) {
143 		len_rx += rx_buf->len;
144 		rx_buf++;
145 	}
146 
147 	return MAX(len_tx, len_rx);
148 }
149 
150 /* process tx data */
151 _attribute_ram_code_sec_
spi_b91_tx(uint8_t peripheral_id,struct spi_context * ctx,uint8_t len)152 static void spi_b91_tx(uint8_t peripheral_id, struct spi_context *ctx, uint8_t len)
153 {
154 	uint8_t tx;
155 
156 	for (int i = 0; i < len; i++) {
157 		if (spi_context_tx_buf_on(ctx)) {
158 			tx = *(uint8_t *)(ctx->tx_buf);
159 		} else {
160 			tx = 0;
161 		}
162 		spi_context_update_tx(ctx, 1, 1);
163 		while (reg_spi_fifo_state(peripheral_id) & FLD_SPI_TXF_FULL) {
164 		};
165 		reg_spi_wr_rd_data(peripheral_id, i % 4) = tx;
166 	}
167 }
168 
169 /* process rx data */
170 _attribute_ram_code_sec_
spi_b91_rx(uint8_t peripheral_id,struct spi_context * ctx,uint8_t len)171 static void spi_b91_rx(uint8_t peripheral_id, struct spi_context *ctx, uint8_t len)
172 {
173 	uint8_t rx = 0;
174 
175 	for (int i = 0; i < len; i++) {
176 		while (reg_spi_fifo_state(peripheral_id) & FLD_SPI_RXF_EMPTY) {
177 		};
178 		rx = reg_spi_wr_rd_data(peripheral_id, i % 4);
179 
180 		if (spi_context_rx_buf_on(ctx)) {
181 			*ctx->rx_buf = rx;
182 		}
183 		spi_context_update_rx(ctx, 1, 1);
184 	}
185 }
186 
187 /* SPI transceive internal */
188 _attribute_ram_code_sec_
spi_b91_txrx(const struct device * dev,uint32_t len)189 static void spi_b91_txrx(const struct device *dev, uint32_t len)
190 {
191 	unsigned int chunk_size = SPI_WR_RD_CHUNK_SIZE_MAX;
192 	struct spi_b91_cfg *cfg = SPI_CFG(dev);
193 	struct spi_context *ctx = &SPI_DATA(dev)->ctx;
194 
195 	/* prepare SPI module */
196 	spi_set_transmode(cfg->peripheral_id, SPI_MODE_WRITE_AND_READ);
197 	spi_set_cmd(cfg->peripheral_id, 0);
198 	spi_tx_cnt(cfg->peripheral_id, len);
199 	spi_rx_cnt(cfg->peripheral_id, len);
200 
201 	/* write and read bytes in chunks */
202 	for (int i = 0; i < len; i = i + chunk_size) {
203 		/* check for tail */
204 		if (chunk_size > (len - i)) {
205 			chunk_size = len - i;
206 		}
207 
208 		/* write bytes */
209 		spi_b91_tx(cfg->peripheral_id, ctx, chunk_size);
210 
211 		/* read bytes */
212 		if (len <= SPI_WR_RD_CHUNK_SIZE_MAX) {
213 			/* read all bytes if len is less than chunk size */
214 			spi_b91_rx(cfg->peripheral_id, ctx, chunk_size);
215 		} else if (i == 0) {
216 			/* head, read 1 byte less than is sent */
217 			spi_b91_rx(cfg->peripheral_id, ctx, chunk_size - 1);
218 		} else if ((len - i) > SPI_WR_RD_CHUNK_SIZE_MAX) {
219 			/* body, read so many bytes as is sent*/
220 			spi_b91_rx(cfg->peripheral_id, ctx, chunk_size);
221 		} else {
222 			/* tail, read the rest bytes */
223 			spi_b91_rx(cfg->peripheral_id, ctx, chunk_size + 1);
224 		}
225 
226 		/* clear TX and RX fifo */
227 		BM_SET(reg_spi_fifo_state(cfg->peripheral_id), FLD_SPI_TXF_CLR);
228 		BM_SET(reg_spi_fifo_state(cfg->peripheral_id), FLD_SPI_RXF_CLR);
229 	}
230 
231 	/* wait for SPI is ready */
232 	while (spi_is_busy(cfg->peripheral_id)) {
233 	};
234 
235 	/* context complete */
236 	spi_context_complete(ctx, dev, 0);
237 }
238 
239 /* Check for supported configuration */
spi_b91_is_config_supported(const struct spi_config * config,struct spi_b91_cfg * b91_config)240 static bool spi_b91_is_config_supported(const struct spi_config *config,
241 					struct spi_b91_cfg *b91_config)
242 {
243 	if (config->operation & SPI_HALF_DUPLEX) {
244 		LOG_ERR("Half-duplex not supported");
245 		return false;
246 	}
247 
248 	/* check for loop back */
249 	if (config->operation & SPI_MODE_LOOP) {
250 		LOG_ERR("Loop back mode not supported");
251 		return false;
252 	}
253 
254 	/* check for transfer LSB first */
255 	if (config->operation & SPI_TRANSFER_LSB) {
256 		LOG_ERR("LSB first not supported");
257 		return false;
258 	}
259 
260 	/* check word size */
261 	if (SPI_WORD_SIZE_GET(config->operation) != SPI_WORD_SIZE) {
262 		LOG_ERR("Word size must be %d", SPI_WORD_SIZE);
263 		return false;
264 	}
265 
266 	/* check for CS active high */
267 	if (config->operation & SPI_CS_ACTIVE_HIGH) {
268 		LOG_ERR("CS active high not supported for HW flow control");
269 		return false;
270 	}
271 
272 	/* check for lines configuration */
273 	if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES)) {
274 		if ((config->operation & SPI_LINES_MASK) == SPI_LINES_OCTAL) {
275 			LOG_ERR("SPI lines Octal is not supported");
276 			return false;
277 		} else if (((config->operation & SPI_LINES_MASK) ==
278 			    SPI_LINES_QUAD) &&
279 			   (b91_config->peripheral_id == PSPI_MODULE)) {
280 			LOG_ERR("SPI lines Quad is not supported by PSPI");
281 			return false;
282 		}
283 	}
284 
285 	/* check for slave configuration */
286 	if (SPI_OP_MODE_GET(config->operation) == SPI_OP_MODE_SLAVE) {
287 		LOG_ERR("SPI Slave is not implemented");
288 		return -ENOTSUP;
289 	}
290 
291 	return true;
292 }
293 
294 /* SPI configuration */
spi_b91_config(const struct device * dev,const struct spi_config * config)295 static int spi_b91_config(const struct device *dev,
296 			  const struct spi_config *config)
297 {
298 	int status = 0;
299 	spi_mode_type_e mode = SPI_MODE0;
300 	struct spi_b91_cfg *b91_config = SPI_CFG(dev);
301 	struct spi_b91_data *b91_data = SPI_DATA(dev);
302 	uint8_t clk_src = b91_config->peripheral_id == PSPI_MODULE ? sys_clk.pclk : sys_clk.hclk;
303 
304 	/* check for unsupported configuration */
305 	if (!spi_b91_is_config_supported(config, b91_config)) {
306 		return -ENOTSUP;
307 	}
308 
309 	/* config slave selection (CS): hw or sw */
310 	if (!spi_b91_config_cs(dev, config)) {
311 		return -ENOTSUP;
312 	}
313 
314 	/* get SPI mode */
315 	if (((config->operation & SPI_MODE_CPHA) == 0) &&
316 	    ((config->operation & SPI_MODE_CPOL) == 0)) {
317 		mode = SPI_MODE0;
318 	} else if (((config->operation & SPI_MODE_CPHA) == 0) &&
319 		   ((config->operation & SPI_MODE_CPOL) == SPI_MODE_CPOL)) {
320 		mode = SPI_MODE1;
321 	} else if (((config->operation & SPI_MODE_CPHA) == SPI_MODE_CPHA) &&
322 		   ((config->operation & SPI_MODE_CPOL) == 0)) {
323 		mode = SPI_MODE2;
324 	} else if (((config->operation & SPI_MODE_CPHA) == SPI_MODE_CPHA) &&
325 		   ((config->operation & SPI_MODE_CPOL) == SPI_MODE_CPOL)) {
326 		mode = SPI_MODE3;
327 	}
328 
329 	/* init SPI master */
330 	spi_master_init(b91_config->peripheral_id,
331 			clk_src * 1000000 / (2 * config->frequency) - 1, mode);
332 	spi_master_config(b91_config->peripheral_id, SPI_NOMAL);
333 
334 	/* set lines configuration */
335 	if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES)) {
336 		uint32_t lines = config->operation & SPI_LINES_MASK;
337 
338 		if (lines == SPI_LINES_SINGLE) {
339 			spi_set_io_mode(b91_config->peripheral_id,
340 					SPI_SINGLE_MODE);
341 		} else if (lines == SPI_LINES_DUAL) {
342 			spi_set_io_mode(b91_config->peripheral_id,
343 					SPI_DUAL_MODE);
344 		} else if (lines == SPI_LINES_QUAD) {
345 			spi_set_io_mode(b91_config->peripheral_id,
346 					HSPI_QUAD_MODE);
347 		}
348 	}
349 
350 	/* configure pins */
351 	status = pinctrl_apply_state(b91_config->pcfg, PINCTRL_STATE_DEFAULT);
352 	if (status < 0) {
353 		LOG_ERR("Failed to configure SPI pins");
354 		return status;
355 	}
356 
357 	/* save context config */
358 	b91_data->ctx.config = config;
359 
360 	return 0;
361 }
362 
363 /* API implementation: init */
spi_b91_init(const struct device * dev)364 static int spi_b91_init(const struct device *dev)
365 {
366 	int err;
367 	struct spi_b91_data *data = SPI_DATA(dev);
368 
369 	err = spi_context_cs_configure_all(&data->ctx);
370 	if (err < 0) {
371 		return err;
372 	}
373 
374 	spi_context_unlock_unconditionally(&data->ctx);
375 
376 	return 0;
377 }
378 
379 /* API implementation: transceive */
spi_b91_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)380 static int spi_b91_transceive(const struct device *dev,
381 			      const struct spi_config *config,
382 			      const struct spi_buf_set *tx_bufs,
383 			      const struct spi_buf_set *rx_bufs)
384 {
385 	int status = 0;
386 	struct spi_b91_data *data = SPI_DATA(dev);
387 	uint32_t txrx_len = spi_b91_get_txrx_len(tx_bufs, rx_bufs);
388 
389 	/* set configuration */
390 	status = spi_b91_config(dev, config);
391 	if (status) {
392 		return status;
393 	}
394 
395 	/* context setup */
396 	spi_context_lock(&data->ctx, false, NULL, NULL, config);
397 	spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
398 
399 	/* if cs is defined: software cs control, set active true */
400 	if (spi_cs_is_gpio(config)) {
401 		spi_context_cs_control(&data->ctx, true);
402 	}
403 
404 	/* transceive data */
405 	spi_b91_txrx(dev, txrx_len);
406 
407 	/* if cs is defined: software cs control, set active false */
408 	if (spi_cs_is_gpio(config)) {
409 		spi_context_cs_control(&data->ctx, false);
410 	}
411 
412 	/* release context */
413 	status = spi_context_wait_for_completion(&data->ctx);
414 	spi_context_release(&data->ctx, status);
415 
416 	return status;
417 }
418 
419 #ifdef CONFIG_SPI_ASYNC
420 /* API implementation: transceive_async */
spi_b91_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)421 static int spi_b91_transceive_async(const struct device *dev,
422 				    const struct spi_config *config,
423 				    const struct spi_buf_set *tx_bufs,
424 				    const struct spi_buf_set *rx_bufs,
425 				    spi_callback_t cb,
426 				    void *userdata)
427 {
428 	ARG_UNUSED(dev);
429 	ARG_UNUSED(config);
430 	ARG_UNUSED(tx_bufs);
431 	ARG_UNUSED(rx_bufs);
432 	ARG_UNUSED(cb);
433 	ARG_UNUSED(userdata);
434 
435 	return -ENOTSUP;
436 }
437 #endif /* CONFIG_SPI_ASYNC */
438 
439 /* API implementation: release */
spi_b91_release(const struct device * dev,const struct spi_config * config)440 static int spi_b91_release(const struct device *dev,
441 			   const struct spi_config *config)
442 {
443 	struct spi_b91_data *data = SPI_DATA(dev);
444 
445 	if (!spi_context_configured(&data->ctx, config)) {
446 		return -EINVAL;
447 	}
448 
449 	spi_context_unlock_unconditionally(&data->ctx);
450 
451 	return 0;
452 }
453 
454 /* SPI driver APIs structure */
455 static const struct spi_driver_api spi_b91_api = {
456 	.transceive = spi_b91_transceive,
457 	.release = spi_b91_release,
458 #ifdef CONFIG_SPI_ASYNC
459 	.transceive_async = spi_b91_transceive_async,
460 #endif /* CONFIG_SPI_ASYNC */
461 };
462 
463 /* SPI driver registration */
464 #define SPI_B91_INIT(inst)						  \
465 									  \
466 	PINCTRL_DT_INST_DEFINE(inst);					  \
467 									  \
468 	static struct spi_b91_data spi_b91_data_##inst = {		  \
469 		SPI_CONTEXT_INIT_LOCK(spi_b91_data_##inst, ctx),	  \
470 		SPI_CONTEXT_INIT_SYNC(spi_b91_data_##inst, ctx),	  \
471 		SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(inst), ctx)	  \
472 	};								  \
473 									  \
474 	static struct spi_b91_cfg spi_b91_cfg_##inst = {		  \
475 		.peripheral_id = DT_INST_ENUM_IDX(inst, peripheral_id),	  \
476 		.cs_pin[0] = DT_INST_STRING_TOKEN(inst, cs0_pin),	  \
477 		.cs_pin[1] = DT_INST_STRING_TOKEN(inst, cs1_pin),	  \
478 		.cs_pin[2] = DT_INST_STRING_TOKEN(inst, cs2_pin),	  \
479 		.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst),		  \
480 	};								  \
481 									  \
482 	DEVICE_DT_INST_DEFINE(inst, spi_b91_init,			  \
483 			      NULL,					  \
484 			      &spi_b91_data_##inst,			  \
485 			      &spi_b91_cfg_##inst,			  \
486 			      POST_KERNEL,				  \
487 			      CONFIG_SPI_INIT_PRIORITY,			  \
488 			      &spi_b91_api);
489 
490 DT_INST_FOREACH_STATUS_OKAY(SPI_B91_INIT)
491