1 /*
2  * Copyright (c) 2024 Nuvoton Technology Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT nuvoton_npcx_spip
8 
9 #include <zephyr/drivers/spi.h>
10 #include <zephyr/drivers/spi/rtio.h>
11 #include <zephyr/drivers/clock_control.h>
12 #include <zephyr/drivers/pinctrl.h>
13 #include <zephyr/kernel.h>
14 
15 #include <zephyr/logging/log.h>
16 LOG_MODULE_REGISTER(spi_npcx_spip, CONFIG_SPI_LOG_LEVEL);
17 
18 #include "spi_context.h"
19 
20 /* Transfer this NOP value when tx buf is null */
21 #define SPI_NPCX_SPIP_TX_NOP                 0x00
22 #define SPI_NPCX_SPIP_WAIT_STATUS_TIMEOUT_US 1000
23 
24 /* The max allowed prescaler divider */
25 #define SPI_NPCX_MAX_PRESCALER_DIV INT8_MAX
26 
27 struct spi_npcx_spip_data {
28 	struct spi_context ctx;
29 	uint32_t src_clock_freq;
30 	uint8_t bytes_per_frame;
31 };
32 
33 struct spi_npcx_spip_cfg {
34 	struct spip_reg *reg_base;
35 	struct npcx_clk_cfg clk_cfg;
36 #ifdef CONFIG_SPI_NPCX_SPIP_INTERRUPT
37 	/* routine for configuring SPIP ISR */
38 	void (*irq_cfg_func)(const struct device *dev);
39 #endif
40 	const struct pinctrl_dev_config *pcfg;
41 };
42 
spi_npcx_spip_configure(const struct device * dev,const struct spi_config * spi_cfg)43 static int spi_npcx_spip_configure(const struct device *dev, const struct spi_config *spi_cfg)
44 {
45 	uint8_t prescaler_divider;
46 	const struct spi_npcx_spip_cfg *const config = dev->config;
47 	struct spi_npcx_spip_data *const data = dev->data;
48 	struct spip_reg *const reg_base = config->reg_base;
49 	spi_operation_t operation = spi_cfg->operation;
50 	uint8_t frame_size;
51 
52 	if (spi_context_configured(&data->ctx, spi_cfg)) {
53 		/* This configuration is already in use */
54 		return 0;
55 	}
56 
57 	if (operation & SPI_HALF_DUPLEX) {
58 		LOG_ERR("Half duplex mode is not supported");
59 		return -ENOTSUP;
60 	}
61 
62 	if (SPI_OP_MODE_GET(operation) != SPI_OP_MODE_MASTER) {
63 		LOG_ERR("Only SPI controller mode is supported");
64 		return -ENOTSUP;
65 	}
66 
67 	if (operation & SPI_MODE_LOOP) {
68 		LOG_ERR("Loopback mode is not supported");
69 		return -ENOTSUP;
70 	}
71 
72 	/*
73 	 * If the GPIO CS configuration is not present, return error because the hardware CS is
74 	 * not supported.
75 	 */
76 	if (!spi_cs_is_gpio(spi_cfg)) {
77 		LOG_ERR("Only GPIO CS is supported");
78 		return -ENOTSUP;
79 	}
80 
81 	/* Get the frame length */
82 	frame_size = SPI_WORD_SIZE_GET(operation);
83 	if (frame_size == 8) {
84 		data->bytes_per_frame = 1;
85 		reg_base->SPIP_CTL1 &= ~BIT(NPCX_SPIP_CTL1_MOD);
86 	} else if (frame_size == 16) {
87 		reg_base->SPIP_CTL1 |= BIT(NPCX_SPIP_CTL1_MOD);
88 		data->bytes_per_frame = 2;
89 	} else {
90 		LOG_ERR("Only support word sizes either 8 or 16 bits");
91 		return -ENOTSUP;
92 	}
93 
94 	if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) &&
95 	    (operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) {
96 		LOG_ERR("Only single line mode is supported");
97 		return -ENOTSUP;
98 	}
99 
100 	/* Set the endianness */
101 	if (operation & SPI_TRANSFER_LSB) {
102 		LOG_ERR("Shift out with LSB is not supported");
103 		return -ENOTSUP;
104 	}
105 
106 	/*
107 	 * Set CPOL and CPHA.
108 	 * The following is how to map npcx spip control register to CPOL and CPHA
109 	 *   CPOL    CPHA  |  SCIDL    SCM
110 	 *   -----------------------------
111 	 *    0       0    |    0       0
112 	 *    0       1    |    0       1
113 	 *    1       0    |    1       1
114 	 *    1       1    |    1       0
115 	 */
116 	if (operation & SPI_MODE_CPOL) {
117 		reg_base->SPIP_CTL1 |= BIT(NPCX_SPIP_CTL1_SCIDL);
118 	} else {
119 		reg_base->SPIP_CTL1 &= ~BIT(NPCX_SPIP_CTL1_SCIDL);
120 	}
121 	if (((operation & SPI_MODE_CPOL) == SPI_MODE_CPOL) !=
122 	    ((operation & SPI_MODE_CPHA) == SPI_MODE_CPHA)) {
123 		reg_base->SPIP_CTL1 |= BIT(NPCX_SPIP_CTL1_SCM);
124 	} else {
125 		reg_base->SPIP_CTL1 &= ~BIT(NPCX_SPIP_CTL1_SCM);
126 	}
127 
128 	/* Set the SPI frequency */
129 	prescaler_divider = data->src_clock_freq / 2 / spi_cfg->frequency;
130 	if (prescaler_divider >= 1) {
131 		prescaler_divider -= 1;
132 	}
133 	if (prescaler_divider >= SPI_NPCX_MAX_PRESCALER_DIV) {
134 		LOG_ERR("SPI divider %d exceeds the max allowed value %d.", prescaler_divider,
135 			SPI_NPCX_MAX_PRESCALER_DIV);
136 		return -ENOTSUP;
137 	}
138 	SET_FIELD(reg_base->SPIP_CTL1, NPCX_SPIP_CTL1_SCDV, prescaler_divider);
139 
140 	data->ctx.config = spi_cfg;
141 
142 	return 0;
143 }
144 
spi_npcx_spip_process_tx_buf(struct spi_npcx_spip_data * const data,uint16_t * tx_frame)145 static void spi_npcx_spip_process_tx_buf(struct spi_npcx_spip_data *const data, uint16_t *tx_frame)
146 {
147 	/* Get the tx_frame from tx_buf only when tx_buf != NULL */
148 	if (spi_context_tx_buf_on(&data->ctx)) {
149 		if (data->bytes_per_frame == 1) {
150 			*tx_frame = UNALIGNED_GET((uint8_t *)(data->ctx.tx_buf));
151 		} else {
152 			*tx_frame = UNALIGNED_GET((uint16_t *)(data->ctx.tx_buf));
153 		}
154 	}
155 	/*
156 	 * The update is ignored if TX is off (tx_len == 0).
157 	 * Note: if tx_buf == NULL && tx_len != 0, the update still counts.
158 	 */
159 	spi_context_update_tx(&data->ctx, data->bytes_per_frame, 1);
160 }
161 
spi_npcx_spip_process_rx_buf(struct spi_npcx_spip_data * const data,uint16_t rx_frame)162 static void spi_npcx_spip_process_rx_buf(struct spi_npcx_spip_data *const data, uint16_t rx_frame)
163 {
164 	if (spi_context_rx_buf_on(&data->ctx)) {
165 		if (data->bytes_per_frame == 1) {
166 			UNALIGNED_PUT(rx_frame, (uint8_t *)data->ctx.rx_buf);
167 		} else {
168 			UNALIGNED_PUT(rx_frame, (uint16_t *)data->ctx.rx_buf);
169 		}
170 	}
171 	spi_context_update_rx(&data->ctx, data->bytes_per_frame, 1);
172 }
173 
174 #ifndef CONFIG_SPI_NPCX_SPIP_INTERRUPT
spi_npcx_spip_xfer_frame(const struct device * dev)175 static int spi_npcx_spip_xfer_frame(const struct device *dev)
176 {
177 	const struct spi_npcx_spip_cfg *const config = dev->config;
178 	struct spip_reg *const reg_base = config->reg_base;
179 	struct spi_npcx_spip_data *const data = dev->data;
180 	uint16_t tx_frame = SPI_NPCX_SPIP_TX_NOP;
181 	uint16_t rx_frame;
182 
183 	spi_npcx_spip_process_tx_buf(data, &tx_frame);
184 
185 	if (WAIT_FOR(!IS_BIT_SET(reg_base->SPIP_STAT, NPCX_SPIP_STAT_BSY),
186 		     SPI_NPCX_SPIP_WAIT_STATUS_TIMEOUT_US, NULL) == false) {
187 		LOG_ERR("Check Status BSY Timeout");
188 		return -ETIMEDOUT;
189 	}
190 
191 	reg_base->SPIP_DATA = tx_frame;
192 
193 	if (WAIT_FOR(IS_BIT_SET(reg_base->SPIP_STAT, NPCX_SPIP_STAT_RBF),
194 		     SPI_NPCX_SPIP_WAIT_STATUS_TIMEOUT_US, NULL) == false) {
195 		LOG_ERR("Check Status RBF Timeout");
196 		return -ETIMEDOUT;
197 	}
198 
199 	rx_frame = reg_base->SPIP_DATA;
200 	spi_npcx_spip_process_rx_buf(data, rx_frame);
201 
202 	return 0;
203 }
204 #endif
205 
spi_npcx_spip_transfer_ongoing(struct spi_npcx_spip_data * data)206 static bool spi_npcx_spip_transfer_ongoing(struct spi_npcx_spip_data *data)
207 {
208 	return spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx);
209 }
210 
211 #ifdef CONFIG_SPI_NPCX_SPIP_INTERRUPT
spi_npcx_spip_isr(const struct device * dev)212 static void spi_npcx_spip_isr(const struct device *dev)
213 {
214 	const struct spi_npcx_spip_cfg *const config = dev->config;
215 	struct spip_reg *const reg_base = config->reg_base;
216 	struct spi_npcx_spip_data *const data = dev->data;
217 	struct spi_context *ctx = &data->ctx;
218 	uint16_t tx_frame = SPI_NPCX_SPIP_TX_NOP;
219 	uint16_t rx_frame;
220 	uint8_t status;
221 
222 	status = reg_base->SPIP_STAT;
223 
224 	if (!IS_BIT_SET(status, NPCX_SPIP_STAT_BSY) && !IS_BIT_SET(status, NPCX_SPIP_STAT_RBF)) {
225 		reg_base->SPIP_CTL1 &= ~BIT(NPCX_SPIP_CTL1_EIW);
226 
227 		spi_npcx_spip_process_tx_buf(data, &tx_frame);
228 		reg_base->SPIP_DATA = tx_frame;
229 	} else if (IS_BIT_SET(status, NPCX_SPIP_STAT_RBF)) {
230 		rx_frame = reg_base->SPIP_DATA;
231 
232 		spi_npcx_spip_process_rx_buf(data, rx_frame);
233 
234 		if (!spi_npcx_spip_transfer_ongoing(data)) {
235 			reg_base->SPIP_CTL1 &= ~BIT(NPCX_SPIP_CTL1_EIR);
236 			/*
237 			 * The CS might not de-assert if SPI_HOLD_ON_CS is configured.
238 			 * In this case, CS de-assertion reles on the caller to explicitly call
239 			 * spi_release() API.
240 			 */
241 			spi_context_cs_control(ctx, false);
242 
243 			spi_context_complete(ctx, dev, 0);
244 
245 		} else {
246 			spi_npcx_spip_process_tx_buf(data, &tx_frame);
247 			reg_base->SPIP_DATA = tx_frame;
248 		}
249 	}
250 }
251 #endif
252 
transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)253 static int transceive(const struct device *dev, const struct spi_config *spi_cfg,
254 		      const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs,
255 		      bool asynchronous, spi_callback_t cb, void *userdata)
256 {
257 	const struct spi_npcx_spip_cfg *const config = dev->config;
258 	struct spip_reg *const reg_base = config->reg_base;
259 	struct spi_npcx_spip_data *const data = dev->data;
260 	struct spi_context *ctx = &data->ctx;
261 	int rc;
262 
263 	if (!tx_bufs && !rx_bufs) {
264 		return 0;
265 	}
266 
267 #ifndef CONFIG_SPI_NPCX_SPIP_INTERRUPT
268 	if (asynchronous) {
269 		return -ENOTSUP;
270 	}
271 #endif
272 
273 	/* Lock the SPI Context */
274 	spi_context_lock(ctx, asynchronous, cb, userdata, spi_cfg);
275 
276 	rc = spi_npcx_spip_configure(dev, spi_cfg);
277 	if (rc < 0) {
278 		spi_context_release(ctx, rc);
279 		return rc;
280 	}
281 
282 	spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, data->bytes_per_frame);
283 	if (!spi_npcx_spip_transfer_ongoing(data)) {
284 		spi_context_release(ctx, 0);
285 		return 0;
286 	}
287 
288 	/* Enable SPIP module */
289 	reg_base->SPIP_CTL1 |= BIT(NPCX_SPIP_CTL1_SPIEN);
290 
291 	/* Cleaning junk data in the buffer */
292 	while (IS_BIT_SET(reg_base->SPIP_STAT, NPCX_SPIP_STAT_RBF)) {
293 		uint8_t unused __attribute__((unused));
294 
295 		unused = reg_base->SPIP_DATA;
296 	}
297 
298 	/* Assert the CS line */
299 	spi_context_cs_control(ctx, true);
300 
301 #ifdef CONFIG_SPI_NPCX_SPIP_INTERRUPT
302 	reg_base->SPIP_CTL1 |= BIT(NPCX_SPIP_CTL1_EIR) | BIT(NPCX_SPIP_CTL1_EIW);
303 	rc = spi_context_wait_for_completion(&data->ctx);
304 #else
305 	do {
306 		rc = spi_npcx_spip_xfer_frame(dev);
307 		if (rc < 0) {
308 			break;
309 		}
310 	} while (spi_npcx_spip_transfer_ongoing(data));
311 
312 	/*
313 	 * The CS might not de-assert if SPI_HOLD_ON_CS is configured.
314 	 * In this case, CS de-assertion reles on the caller to explicitly call spi_release() API.
315 	 */
316 	spi_context_cs_control(ctx, false);
317 
318 #endif
319 	spi_context_release(ctx, rc);
320 
321 	return rc;
322 }
323 
spi_npcx_spip_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)324 static int spi_npcx_spip_transceive(const struct device *dev, const struct spi_config *spi_cfg,
325 				    const struct spi_buf_set *tx_bufs,
326 				    const struct spi_buf_set *rx_bufs)
327 {
328 	return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL);
329 }
330 
331 #ifdef CONFIG_SPI_ASYNC
spi_npcx_spip_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)332 static int spi_npcx_spip_transceive_async(const struct device *dev,
333 					  const struct spi_config *spi_cfg,
334 					  const struct spi_buf_set *tx_bufs,
335 					  const struct spi_buf_set *rx_bufs, spi_callback_t cb,
336 					  void *userdata)
337 {
338 	return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata);
339 }
340 #endif
341 
spi_npcx_spip_release(const struct device * dev,const struct spi_config * spi_cfg)342 static int spi_npcx_spip_release(const struct device *dev, const struct spi_config *spi_cfg)
343 {
344 	struct spi_npcx_spip_data *const data = dev->data;
345 	struct spi_context *ctx = &data->ctx;
346 
347 	if (!spi_context_configured(ctx, spi_cfg)) {
348 		return -EINVAL;
349 	}
350 
351 	spi_context_unlock_unconditionally(ctx);
352 
353 	return 0;
354 }
355 
spi_npcx_spip_init(const struct device * dev)356 static int spi_npcx_spip_init(const struct device *dev)
357 {
358 	int ret;
359 	struct spi_npcx_spip_data *const data = dev->data;
360 	const struct spi_npcx_spip_cfg *const config = dev->config;
361 	struct spip_reg *const reg_base = config->reg_base;
362 	const struct device *const clk_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE);
363 
364 	if (!device_is_ready(clk_dev)) {
365 		LOG_ERR("clock control device not ready");
366 		return -ENODEV;
367 	}
368 
369 	ret = clock_control_on(clk_dev, (clock_control_subsys_t)&config->clk_cfg);
370 	if (ret < 0) {
371 		LOG_ERR("Turn on SPIP clock fail %d", ret);
372 		return ret;
373 	}
374 
375 	ret = clock_control_get_rate(clk_dev, (clock_control_subsys_t)&config->clk_cfg,
376 				     &data->src_clock_freq);
377 	if (ret < 0) {
378 		LOG_ERR("Get SPIP clock source rate error %d", ret);
379 		return ret;
380 	}
381 
382 	ret = spi_context_cs_configure_all(&data->ctx);
383 	if (ret < 0) {
384 		return ret;
385 	}
386 
387 	ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
388 	if (ret < 0) {
389 		return ret;
390 	}
391 
392 	/* Make sure the context is unlocked */
393 	spi_context_unlock_unconditionally(&data->ctx);
394 
395 #ifdef CONFIG_SPI_NPCX_SPIP_INTERRUPT
396 	config->irq_cfg_func(dev);
397 #endif
398 
399 	/* Enable SPIP module */
400 	reg_base->SPIP_CTL1 |= BIT(NPCX_SPIP_CTL1_SPIEN);
401 
402 	return 0;
403 }
404 
405 static DEVICE_API(spi, spi_npcx_spip_api) = {
406 	.transceive = spi_npcx_spip_transceive,
407 	.release = spi_npcx_spip_release,
408 #ifdef CONFIG_SPI_ASYNC
409 	.transceive_async = spi_npcx_spip_transceive_async,
410 #endif
411 #ifdef CONFIG_SPI_RTIO
412 	.iodev_submit = spi_rtio_iodev_default_submit,
413 #endif
414 };
415 
416 #ifdef CONFIG_SPI_NPCX_SPIP_INTERRUPT
417 #define NPCX_SPIP_IRQ_HANDLER(n)                                                                   \
418 	static void spi_npcx_spip_irq_cfg_func_##n(const struct device *dev)                       \
419 	{                                                                                          \
420 		IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), spi_npcx_spip_isr,          \
421 			    DEVICE_DT_INST_GET(n), 0);                                             \
422 		irq_enable(DT_INST_IRQN(n));                                                       \
423 	}
424 
425 #define NPCX_SPIP_IRQ_HANDLER_FUNC(n) .irq_cfg_func = spi_npcx_spip_irq_cfg_func_##n,
426 #else
427 #define NPCX_SPIP_IRQ_HANDLER_FUNC(n)
428 #define NPCX_SPIP_IRQ_HANDLER(n)
429 #endif
430 
431 #define NPCX_SPI_INIT(n)                                                                           \
432 	PINCTRL_DT_INST_DEFINE(n);                                                                 \
433 	NPCX_SPIP_IRQ_HANDLER(n)                                                                   \
434                                                                                                    \
435 	static struct spi_npcx_spip_data spi_npcx_spip_data_##n = {                                \
436 		SPI_CONTEXT_INIT_LOCK(spi_npcx_spip_data_##n, ctx),                                \
437 		SPI_CONTEXT_INIT_SYNC(spi_npcx_spip_data_##n, ctx),                                \
438 		SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx)};                             \
439                                                                                                    \
440 	static struct spi_npcx_spip_cfg spi_npcx_spip_cfg_##n = {                                  \
441 		.reg_base = (struct spip_reg *)DT_INST_REG_ADDR(n),                                \
442 		.clk_cfg = NPCX_DT_CLK_CFG_ITEM(n),                                                \
443 		.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n),                                         \
444 		NPCX_SPIP_IRQ_HANDLER_FUNC(n)};                                                    \
445                                                                                                    \
446 	SPI_DEVICE_DT_INST_DEFINE(n, spi_npcx_spip_init, NULL, &spi_npcx_spip_data_##n,            \
447 			      &spi_npcx_spip_cfg_##n, POST_KERNEL, CONFIG_SPI_INIT_PRIORITY,       \
448 			      &spi_npcx_spip_api);
449 
450 DT_INST_FOREACH_STATUS_OKAY(NPCX_SPI_INIT)
451