1 /*
2  * Copyright (c) 2024 Nuvoton Technology Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT nuvoton_npcx_spip
8 
9 #include <zephyr/drivers/spi.h>
10 #include <zephyr/drivers/clock_control.h>
11 #include <zephyr/drivers/pinctrl.h>
12 #include <zephyr/kernel.h>
13 
14 #include <zephyr/logging/log.h>
15 LOG_MODULE_REGISTER(spi_npcx_spip, CONFIG_SPI_LOG_LEVEL);
16 
17 #include "spi_context.h"
18 
19 /* Transfer this NOP value when tx buf is null */
20 #define SPI_NPCX_SPIP_TX_NOP                 0x00
21 #define SPI_NPCX_SPIP_WAIT_STATUS_TIMEOUT_US 1000
22 
23 /* The max allowed prescaler divider */
24 #define SPI_NPCX_MAX_PRESCALER_DIV INT8_MAX
25 
26 struct spi_npcx_spip_data {
27 	struct spi_context ctx;
28 	uint32_t src_clock_freq;
29 	uint8_t bytes_per_frame;
30 };
31 
32 struct spi_npcx_spip_cfg {
33 	struct spip_reg *reg_base;
34 	struct npcx_clk_cfg clk_cfg;
35 #ifdef CONFIG_SPI_NPCX_SPIP_INTERRUPT
36 	/* routine for configuring SPIP ISR */
37 	void (*irq_cfg_func)(const struct device *dev);
38 #endif
39 	const struct pinctrl_dev_config *pcfg;
40 };
41 
spi_npcx_spip_configure(const struct device * dev,const struct spi_config * spi_cfg)42 static int spi_npcx_spip_configure(const struct device *dev, const struct spi_config *spi_cfg)
43 {
44 	uint8_t prescaler_divider;
45 	const struct spi_npcx_spip_cfg *const config = dev->config;
46 	struct spi_npcx_spip_data *const data = dev->data;
47 	struct spip_reg *const reg_base = config->reg_base;
48 	spi_operation_t operation = spi_cfg->operation;
49 	uint8_t frame_size;
50 
51 	if (spi_context_configured(&data->ctx, spi_cfg)) {
52 		/* This configuration is already in use */
53 		return 0;
54 	}
55 
56 	if (operation & SPI_HALF_DUPLEX) {
57 		LOG_ERR("Half duplex mode is not supported");
58 		return -ENOTSUP;
59 	}
60 
61 	if (SPI_OP_MODE_GET(operation) != SPI_OP_MODE_MASTER) {
62 		LOG_ERR("Only SPI controller mode is supported");
63 		return -ENOTSUP;
64 	}
65 
66 	if (operation & SPI_MODE_LOOP) {
67 		LOG_ERR("Loopback mode is not supported");
68 		return -ENOTSUP;
69 	}
70 
71 	/*
72 	 * If the GPIO CS configuration is not present, return error because the hardware CS is
73 	 * not supported.
74 	 */
75 	if (!spi_cs_is_gpio(spi_cfg)) {
76 		LOG_ERR("Only GPIO CS is supported");
77 		return -ENOTSUP;
78 	}
79 
80 	/* Get the frame length */
81 	frame_size = SPI_WORD_SIZE_GET(operation);
82 	if (frame_size == 8) {
83 		data->bytes_per_frame = 1;
84 		reg_base->SPIP_CTL1 &= ~BIT(NPCX_SPIP_CTL1_MOD);
85 	} else if (frame_size == 16) {
86 		reg_base->SPIP_CTL1 |= BIT(NPCX_SPIP_CTL1_MOD);
87 		data->bytes_per_frame = 2;
88 	} else {
89 		LOG_ERR("Only support word sizes either 8 or 16 bits");
90 		return -ENOTSUP;
91 	}
92 
93 	if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) &&
94 	    (operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) {
95 		LOG_ERR("Only single line mode is supported");
96 		return -ENOTSUP;
97 	}
98 
99 	/* Set the endianness */
100 	if (operation & SPI_TRANSFER_LSB) {
101 		LOG_ERR("Shift out with LSB is not supported");
102 		return -ENOTSUP;
103 	}
104 
105 	/*
106 	 * Set CPOL and CPHA.
107 	 * The following is how to map npcx spip control register to CPOL and CPHA
108 	 *   CPOL    CPHA  |  SCIDL    SCM
109 	 *   -----------------------------
110 	 *    0       0    |    0       0
111 	 *    0       1    |    0       1
112 	 *    1       0    |    1       1
113 	 *    1       1    |    1       0
114 	 */
115 	if (operation & SPI_MODE_CPOL) {
116 		reg_base->SPIP_CTL1 |= BIT(NPCX_SPIP_CTL1_SCIDL);
117 	} else {
118 		reg_base->SPIP_CTL1 &= ~BIT(NPCX_SPIP_CTL1_SCIDL);
119 	}
120 	if (((operation & SPI_MODE_CPOL) == SPI_MODE_CPOL) !=
121 	    ((operation & SPI_MODE_CPHA) == SPI_MODE_CPHA)) {
122 		reg_base->SPIP_CTL1 |= BIT(NPCX_SPIP_CTL1_SCM);
123 	} else {
124 		reg_base->SPIP_CTL1 &= ~BIT(NPCX_SPIP_CTL1_SCM);
125 	}
126 
127 	/* Set the SPI frequency */
128 	prescaler_divider = data->src_clock_freq / 2 / spi_cfg->frequency;
129 	if (prescaler_divider >= 1) {
130 		prescaler_divider -= 1;
131 	}
132 	if (prescaler_divider >= SPI_NPCX_MAX_PRESCALER_DIV) {
133 		LOG_ERR("SPI divider %d exceeds the max allowed value %d.", prescaler_divider,
134 			SPI_NPCX_MAX_PRESCALER_DIV);
135 		return -ENOTSUP;
136 	}
137 	SET_FIELD(reg_base->SPIP_CTL1, NPCX_SPIP_CTL1_SCDV, prescaler_divider);
138 
139 	data->ctx.config = spi_cfg;
140 
141 	return 0;
142 }
143 
spi_npcx_spip_process_tx_buf(struct spi_npcx_spip_data * const data,uint16_t * tx_frame)144 static void spi_npcx_spip_process_tx_buf(struct spi_npcx_spip_data *const data, uint16_t *tx_frame)
145 {
146 	/* Get the tx_frame from tx_buf only when tx_buf != NULL */
147 	if (spi_context_tx_buf_on(&data->ctx)) {
148 		if (data->bytes_per_frame == 1) {
149 			*tx_frame = UNALIGNED_GET((uint8_t *)(data->ctx.tx_buf));
150 		} else {
151 			*tx_frame = UNALIGNED_GET((uint16_t *)(data->ctx.tx_buf));
152 		}
153 	}
154 	/*
155 	 * The update is ignored if TX is off (tx_len == 0).
156 	 * Note: if tx_buf == NULL && tx_len != 0, the update still counts.
157 	 */
158 	spi_context_update_tx(&data->ctx, data->bytes_per_frame, 1);
159 }
160 
spi_npcx_spip_process_rx_buf(struct spi_npcx_spip_data * const data,uint16_t rx_frame)161 static void spi_npcx_spip_process_rx_buf(struct spi_npcx_spip_data *const data, uint16_t rx_frame)
162 {
163 	if (spi_context_rx_buf_on(&data->ctx)) {
164 		if (data->bytes_per_frame == 1) {
165 			UNALIGNED_PUT(rx_frame, (uint8_t *)data->ctx.rx_buf);
166 		} else {
167 			UNALIGNED_PUT(rx_frame, (uint16_t *)data->ctx.rx_buf);
168 		}
169 	}
170 	spi_context_update_rx(&data->ctx, data->bytes_per_frame, 1);
171 }
172 
173 #ifndef CONFIG_SPI_NPCX_SPIP_INTERRUPT
spi_npcx_spip_xfer_frame(const struct device * dev)174 static int spi_npcx_spip_xfer_frame(const struct device *dev)
175 {
176 	const struct spi_npcx_spip_cfg *const config = dev->config;
177 	struct spip_reg *const reg_base = config->reg_base;
178 	struct spi_npcx_spip_data *const data = dev->data;
179 	uint16_t tx_frame = SPI_NPCX_SPIP_TX_NOP;
180 	uint16_t rx_frame;
181 
182 	spi_npcx_spip_process_tx_buf(data, &tx_frame);
183 
184 	if (WAIT_FOR(!IS_BIT_SET(reg_base->SPIP_STAT, NPCX_SPIP_STAT_BSY),
185 		     SPI_NPCX_SPIP_WAIT_STATUS_TIMEOUT_US, NULL) == false) {
186 		LOG_ERR("Check Status BSY Timeout");
187 		return -ETIMEDOUT;
188 	}
189 
190 	reg_base->SPIP_DATA = tx_frame;
191 
192 	if (WAIT_FOR(IS_BIT_SET(reg_base->SPIP_STAT, NPCX_SPIP_STAT_RBF),
193 		     SPI_NPCX_SPIP_WAIT_STATUS_TIMEOUT_US, NULL) == false) {
194 		LOG_ERR("Check Status RBF Timeout");
195 		return -ETIMEDOUT;
196 	}
197 
198 	rx_frame = reg_base->SPIP_DATA;
199 	spi_npcx_spip_process_rx_buf(data, rx_frame);
200 
201 	return 0;
202 }
203 #endif
204 
spi_npcx_spip_transfer_ongoing(struct spi_npcx_spip_data * data)205 static bool spi_npcx_spip_transfer_ongoing(struct spi_npcx_spip_data *data)
206 {
207 	return spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx);
208 }
209 
210 #ifdef CONFIG_SPI_NPCX_SPIP_INTERRUPT
spi_npcx_spip_isr(const struct device * dev)211 static void spi_npcx_spip_isr(const struct device *dev)
212 {
213 	const struct spi_npcx_spip_cfg *const config = dev->config;
214 	struct spip_reg *const reg_base = config->reg_base;
215 	struct spi_npcx_spip_data *const data = dev->data;
216 	struct spi_context *ctx = &data->ctx;
217 	uint16_t tx_frame = SPI_NPCX_SPIP_TX_NOP;
218 	uint16_t rx_frame;
219 	uint8_t status;
220 
221 	status = reg_base->SPIP_STAT;
222 
223 	if (!IS_BIT_SET(status, NPCX_SPIP_STAT_BSY) && !IS_BIT_SET(status, NPCX_SPIP_STAT_RBF)) {
224 		reg_base->SPIP_CTL1 &= ~BIT(NPCX_SPIP_CTL1_EIW);
225 
226 		spi_npcx_spip_process_tx_buf(data, &tx_frame);
227 		reg_base->SPIP_DATA = tx_frame;
228 	} else if (IS_BIT_SET(status, NPCX_SPIP_STAT_RBF)) {
229 		rx_frame = reg_base->SPIP_DATA;
230 
231 		spi_npcx_spip_process_rx_buf(data, rx_frame);
232 
233 		if (!spi_npcx_spip_transfer_ongoing(data)) {
234 			reg_base->SPIP_CTL1 &= ~BIT(NPCX_SPIP_CTL1_EIR);
235 			/*
236 			 * The CS might not de-assert if SPI_HOLD_ON_CS is configured.
237 			 * In this case, CS de-assertion reles on the caller to explicitly call
238 			 * spi_release() API.
239 			 */
240 			spi_context_cs_control(ctx, false);
241 
242 			spi_context_complete(ctx, dev, 0);
243 
244 		} else {
245 			spi_npcx_spip_process_tx_buf(data, &tx_frame);
246 			reg_base->SPIP_DATA = tx_frame;
247 		}
248 	}
249 }
250 #endif
251 
transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)252 static int transceive(const struct device *dev, const struct spi_config *spi_cfg,
253 		      const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs,
254 		      bool asynchronous, spi_callback_t cb, void *userdata)
255 {
256 	const struct spi_npcx_spip_cfg *const config = dev->config;
257 	struct spip_reg *const reg_base = config->reg_base;
258 	struct spi_npcx_spip_data *const data = dev->data;
259 	struct spi_context *ctx = &data->ctx;
260 	int rc;
261 
262 	if (!tx_bufs && !rx_bufs) {
263 		return 0;
264 	}
265 
266 #ifndef CONFIG_SPI_NPCX_SPIP_INTERRUPT
267 	if (asynchronous) {
268 		return -ENOTSUP;
269 	}
270 #endif
271 
272 	/* Lock the SPI Context */
273 	spi_context_lock(ctx, asynchronous, cb, userdata, spi_cfg);
274 
275 	rc = spi_npcx_spip_configure(dev, spi_cfg);
276 	if (rc < 0) {
277 		spi_context_release(ctx, rc);
278 		return rc;
279 	}
280 
281 	spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, data->bytes_per_frame);
282 	if (!spi_npcx_spip_transfer_ongoing(data)) {
283 		spi_context_release(ctx, 0);
284 		return 0;
285 	}
286 
287 	/* Enable SPIP module */
288 	reg_base->SPIP_CTL1 |= BIT(NPCX_SPIP_CTL1_SPIEN);
289 
290 	/* Cleaning junk data in the buffer */
291 	while (IS_BIT_SET(reg_base->SPIP_STAT, NPCX_SPIP_STAT_RBF)) {
292 		uint8_t unused __attribute__((unused));
293 
294 		unused = reg_base->SPIP_DATA;
295 	}
296 
297 	/* Assert the CS line */
298 	spi_context_cs_control(ctx, true);
299 
300 #ifdef CONFIG_SPI_NPCX_SPIP_INTERRUPT
301 	reg_base->SPIP_CTL1 |= BIT(NPCX_SPIP_CTL1_EIR) | BIT(NPCX_SPIP_CTL1_EIW);
302 	rc = spi_context_wait_for_completion(&data->ctx);
303 #else
304 	do {
305 		rc = spi_npcx_spip_xfer_frame(dev);
306 		if (rc < 0) {
307 			break;
308 		}
309 	} while (spi_npcx_spip_transfer_ongoing(data));
310 
311 	/*
312 	 * The CS might not de-assert if SPI_HOLD_ON_CS is configured.
313 	 * In this case, CS de-assertion reles on the caller to explicitly call spi_release() API.
314 	 */
315 	spi_context_cs_control(ctx, false);
316 
317 #endif
318 	spi_context_release(ctx, rc);
319 
320 	return rc;
321 }
322 
spi_npcx_spip_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)323 static int spi_npcx_spip_transceive(const struct device *dev, const struct spi_config *spi_cfg,
324 				    const struct spi_buf_set *tx_bufs,
325 				    const struct spi_buf_set *rx_bufs)
326 {
327 	return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL);
328 }
329 
330 #ifdef CONFIG_SPI_ASYNC
spi_npcx_spip_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)331 static int spi_npcx_spip_transceive_async(const struct device *dev,
332 					  const struct spi_config *spi_cfg,
333 					  const struct spi_buf_set *tx_bufs,
334 					  const struct spi_buf_set *rx_bufs, spi_callback_t cb,
335 					  void *userdata)
336 {
337 	return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata);
338 }
339 #endif
340 
spi_npcx_spip_release(const struct device * dev,const struct spi_config * spi_cfg)341 static int spi_npcx_spip_release(const struct device *dev, const struct spi_config *spi_cfg)
342 {
343 	struct spi_npcx_spip_data *const data = dev->data;
344 	struct spi_context *ctx = &data->ctx;
345 
346 	if (!spi_context_configured(ctx, spi_cfg)) {
347 		return -EINVAL;
348 	}
349 
350 	spi_context_unlock_unconditionally(ctx);
351 
352 	return 0;
353 }
354 
spi_npcx_spip_init(const struct device * dev)355 static int spi_npcx_spip_init(const struct device *dev)
356 {
357 	int ret;
358 	struct spi_npcx_spip_data *const data = dev->data;
359 	const struct spi_npcx_spip_cfg *const config = dev->config;
360 	struct spip_reg *const reg_base = config->reg_base;
361 	const struct device *const clk_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE);
362 
363 	if (!device_is_ready(clk_dev)) {
364 		LOG_ERR("clock control device not ready");
365 		return -ENODEV;
366 	}
367 
368 	ret = clock_control_on(clk_dev, (clock_control_subsys_t)&config->clk_cfg);
369 	if (ret < 0) {
370 		LOG_ERR("Turn on SPIP clock fail %d", ret);
371 		return ret;
372 	}
373 
374 	ret = clock_control_get_rate(clk_dev, (clock_control_subsys_t)&config->clk_cfg,
375 				     &data->src_clock_freq);
376 	if (ret < 0) {
377 		LOG_ERR("Get SPIP clock source rate error %d", ret);
378 		return ret;
379 	}
380 
381 	ret = spi_context_cs_configure_all(&data->ctx);
382 	if (ret < 0) {
383 		return ret;
384 	}
385 
386 	ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
387 	if (ret < 0) {
388 		return ret;
389 	}
390 
391 	/* Make sure the context is unlocked */
392 	spi_context_unlock_unconditionally(&data->ctx);
393 
394 #ifdef CONFIG_SPI_NPCX_SPIP_INTERRUPT
395 	config->irq_cfg_func(dev);
396 #endif
397 
398 	/* Enable SPIP module */
399 	reg_base->SPIP_CTL1 |= BIT(NPCX_SPIP_CTL1_SPIEN);
400 
401 	return 0;
402 }
403 
404 static struct spi_driver_api spi_npcx_spip_api = {
405 	.transceive = spi_npcx_spip_transceive,
406 	.release = spi_npcx_spip_release,
407 #ifdef CONFIG_SPI_ASYNC
408 	.transceive_async = spi_npcx_spip_transceive_async,
409 #endif
410 };
411 
412 #ifdef CONFIG_SPI_NPCX_SPIP_INTERRUPT
413 #define NPCX_SPIP_IRQ_HANDLER(n)                                                                   \
414 	static void spi_npcx_spip_irq_cfg_func_##n(const struct device *dev)                       \
415 	{                                                                                          \
416 		IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), spi_npcx_spip_isr,          \
417 			    DEVICE_DT_INST_GET(n), 0);                                             \
418 		irq_enable(DT_INST_IRQN(n));                                                       \
419 	}
420 
421 #define NPCX_SPIP_IRQ_HANDLER_FUNC(n) .irq_cfg_func = spi_npcx_spip_irq_cfg_func_##n,
422 #else
423 #define NPCX_SPIP_IRQ_HANDLER_FUNC(n)
424 #define NPCX_SPIP_IRQ_HANDLER(n)
425 #endif
426 
427 #define NPCX_SPI_INIT(n)                                                                           \
428 	PINCTRL_DT_INST_DEFINE(n);                                                                 \
429 	NPCX_SPIP_IRQ_HANDLER(n)                                                                   \
430                                                                                                    \
431 	static struct spi_npcx_spip_data spi_npcx_spip_data_##n = {                                \
432 		SPI_CONTEXT_INIT_LOCK(spi_npcx_spip_data_##n, ctx),                                \
433 		SPI_CONTEXT_INIT_SYNC(spi_npcx_spip_data_##n, ctx),                                \
434 		SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx)};                             \
435                                                                                                    \
436 	static struct spi_npcx_spip_cfg spi_npcx_spip_cfg_##n = {                                  \
437 		.reg_base = (struct spip_reg *)DT_INST_REG_ADDR(n),                                \
438 		.clk_cfg = NPCX_DT_CLK_CFG_ITEM(n),                                                \
439 		.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n),                                         \
440 		NPCX_SPIP_IRQ_HANDLER_FUNC(n)};                                                    \
441                                                                                                    \
442 	DEVICE_DT_INST_DEFINE(n, spi_npcx_spip_init, NULL, &spi_npcx_spip_data_##n,                \
443 			      &spi_npcx_spip_cfg_##n, POST_KERNEL, CONFIG_SPI_INIT_PRIORITY,       \
444 			      &spi_npcx_spip_api);
445 
446 DT_INST_FOREACH_STATUS_OKAY(NPCX_SPI_INIT)
447