1 /*
2  * Copyright (c) 2023 Intel Corporation.
3  * SPDX-License-Identifier: Apache-2.0
4  */
5 
6 #define DT_DRV_COMPAT intel_penwell_spi
7 
8 #include <errno.h>
9 #include <stdint.h>
10 #include <stdbool.h>
11 #include <zephyr/kernel.h>
12 #include <zephyr/drivers/spi.h>
13 #include <zephyr/drivers/spi/rtio.h>
14 
15 #if DT_ANY_INST_ON_BUS_STATUS_OKAY(pcie)
16 BUILD_ASSERT(IS_ENABLED(CONFIG_PCIE), "DT need CONFIG_PCIE");
17 #include <zephyr/drivers/pcie/pcie.h>
18 #endif
19 
20 #include <zephyr/logging/log.h>
21 LOG_MODULE_REGISTER(spi_pw, CONFIG_SPI_LOG_LEVEL);
22 
23 #include "spi_pw.h"
24 
spi_pw_reg_read(const struct device * dev,uint32_t offset)25 static uint32_t spi_pw_reg_read(const struct device *dev, uint32_t offset)
26 {
27 	return sys_read32(DEVICE_MMIO_GET(dev) + offset);
28 }
29 
spi_pw_reg_write(const struct device * dev,uint32_t offset,uint32_t val)30 static void spi_pw_reg_write(const struct device *dev,
31 			     uint32_t offset,
32 			     uint32_t val)
33 {
34 	sys_write32(val, DEVICE_MMIO_GET(dev) + offset);
35 }
36 
spi_pw_ssp_reset(const struct device * dev)37 static void spi_pw_ssp_reset(const struct device *dev)
38 {
39 	/* Bring the controller from reset state in to operational mode */
40 	spi_pw_reg_write(dev, PW_SPI_REG_RESETS, 0x00);
41 	spi_pw_reg_write(dev, PW_SPI_REG_RESETS, PW_SPI_INST_RESET);
42 }
43 
44 #ifndef CONFIG_SPI_PW_INTERRUPT
is_spi_transfer_ongoing(struct spi_pw_data * spi)45 static bool is_spi_transfer_ongoing(struct spi_pw_data *spi)
46 {
47 	return spi_context_tx_on(&spi->ctx) || spi_context_rx_on(&spi->ctx);
48 }
49 #endif
50 
spi_pw_enable_cs_hw_ctrl(const struct device * dev)51 static void spi_pw_enable_cs_hw_ctrl(const struct device *dev)
52 {
53 	uint32_t cs_ctrl;
54 
55 	cs_ctrl = spi_pw_reg_read(dev, PW_SPI_REG_CS_CTRL);
56 	cs_ctrl &= PW_SPI_CS_CTRL_HW_MODE;
57 	spi_pw_reg_write(dev, PW_SPI_REG_CS_CTRL, cs_ctrl);
58 }
59 
spi_pw_cs_sw_ctrl(const struct device * dev,bool enable)60 static void spi_pw_cs_sw_ctrl(const struct device *dev, bool enable)
61 {
62 	uint32_t cs_ctrl;
63 
64 	cs_ctrl = spi_pw_reg_read(dev, PW_SPI_REG_CS_CTRL);
65 	cs_ctrl &= ~(PW_SPI_CS_CTRL_CS_MASK);
66 	/* Enable chip select software control method */
67 	cs_ctrl |= PW_SPI_CS_CTRL_SW_MODE;
68 
69 	if (enable) {
70 		cs_ctrl &= PW_SPI_CS_LOW;
71 	} else {
72 		cs_ctrl |= PW_SPI_CS_HIGH;
73 	}
74 
75 	spi_pw_reg_write(dev, PW_SPI_REG_CS_CTRL, cs_ctrl);
76 }
77 
78 #ifdef CONFIG_SPI_PW_INTERRUPT
spi_pw_intr_enable(const struct device * dev,bool rx_mask)79 static void spi_pw_intr_enable(const struct device *dev, bool rx_mask)
80 {
81 	uint32_t ctrlr1;
82 
83 	ctrlr1 = spi_pw_reg_read(dev, PW_SPI_REG_CTRLR1);
84 	if (rx_mask) {
85 		ctrlr1 |= PW_SPI_INTR_BITS;
86 	} else {
87 		ctrlr1 |= PW_SPI_INTR_BITS;
88 		ctrlr1 &= ~(PW_SPI_INTR_MASK_RX);
89 	}
90 	spi_pw_reg_write(dev, PW_SPI_REG_CTRLR1, ctrlr1);
91 }
92 
spi_pw_intr_disable(const struct device * dev)93 static void spi_pw_intr_disable(const struct device *dev)
94 {
95 	uint32_t ctrlr1;
96 
97 	ctrlr1 = spi_pw_reg_read(dev, PW_SPI_REG_CTRLR1);
98 	ctrlr1 &= ~(PW_SPI_INTR_BITS);
99 	spi_pw_reg_write(dev, PW_SPI_REG_CTRLR1, ctrlr1);
100 }
101 #endif
102 
spi_pw_ssp_enable(const struct device * dev)103 static void spi_pw_ssp_enable(const struct device *dev)
104 {
105 	uint32_t ctrlr0;
106 
107 	ctrlr0 = spi_pw_reg_read(dev, PW_SPI_REG_CTRLR0);
108 	ctrlr0 |= PW_SPI_CTRLR0_SSE_BIT;
109 	spi_pw_reg_write(dev, PW_SPI_REG_CTRLR0, ctrlr0);
110 
111 }
112 
spi_pw_ssp_disable(const struct device * dev)113 static void spi_pw_ssp_disable(const struct device *dev)
114 {
115 	uint32_t ctrlr0;
116 
117 	ctrlr0 = spi_pw_reg_read(dev, PW_SPI_REG_CTRLR0);
118 	ctrlr0 &= ~(PW_SPI_CTRLR0_SSE_BIT);
119 	spi_pw_reg_write(dev, PW_SPI_REG_CTRLR0, ctrlr0);
120 }
121 
is_pw_ssp_busy(const struct device * dev)122 static bool is_pw_ssp_busy(const struct device *dev)
123 {
124 	uint32_t status;
125 
126 	status = spi_pw_reg_read(dev, PW_SPI_REG_SSSR);
127 	return (status & PW_SPI_SSSR_BSY_BIT) ? true : false;
128 }
129 
spi_pw_get_frame_size(const struct spi_config * config)130 static uint8_t spi_pw_get_frame_size(const struct spi_config *config)
131 {
132 	uint8_t dfs = SPI_WORD_SIZE_GET(config->operation);
133 
134 	dfs /= PW_SPI_WIDTH_8BITS;
135 
136 	if ((dfs == 0) || (dfs > PW_SPI_FRAME_SIZE_4_BYTES)) {
137 		LOG_WRN("Unsupported dfs, 1-byte size will be used");
138 		dfs = PW_SPI_FRAME_SIZE_1_BYTE;
139 	}
140 
141 	return dfs;
142 }
143 
spi_pw_cs_ctrl_enable(const struct device * dev,bool enable)144 void spi_pw_cs_ctrl_enable(const struct device *dev, bool enable)
145 {
146 	struct spi_pw_data *spi = dev->data;
147 
148 	if (enable == true) {
149 		if (spi->cs_mode == CS_SW_MODE) {
150 			spi_pw_cs_sw_ctrl(dev, true);
151 		} else if (spi->cs_mode == CS_GPIO_MODE) {
152 			spi_context_cs_control(&spi->ctx, true);
153 		}
154 	} else {
155 		if (spi->cs_mode == CS_SW_MODE) {
156 			spi_pw_cs_sw_ctrl(dev, false);
157 		} else if (spi->cs_mode == CS_GPIO_MODE) {
158 			spi_context_cs_control(&spi->ctx, false);
159 		}
160 	}
161 }
162 
spi_pw_cs_ctrl_init(const struct device * dev)163 static void spi_pw_cs_ctrl_init(const struct device *dev)
164 {
165 	uint32_t cs_ctrl;
166 	struct spi_pw_data *spi = dev->data;
167 
168 	/* Enable chip select output CS0/CS1 */
169 	cs_ctrl = spi_pw_reg_read(dev, PW_SPI_REG_CS_CTRL);
170 
171 	if (spi->cs_output == PW_SPI_CS1_OUTPUT_SELECT) {
172 		cs_ctrl &= ~(PW_SPI_CS_CTRL_CS_MASK << PW_SPI_CS_EN_SHIFT);
173 		/* Set chip select CS1 */
174 		cs_ctrl |= PW_SPI_CS1_SELECT;
175 	} else {
176 		/* Set chip select CS0 */
177 		cs_ctrl &= ~(PW_SPI_CS_CTRL_CS_MASK << PW_SPI_CS_EN_SHIFT);
178 	}
179 
180 	spi_pw_reg_write(dev, PW_SPI_REG_CS_CTRL, cs_ctrl);
181 
182 	if (spi->cs_mode == CS_HW_MODE) {
183 		spi_pw_enable_cs_hw_ctrl(dev);
184 	} else if (spi->cs_mode == CS_SW_MODE) {
185 		spi_pw_cs_sw_ctrl(dev, false);
186 	} else if (spi->cs_mode == CS_GPIO_MODE) {
187 		spi_pw_cs_sw_ctrl(dev, false);
188 	}
189 }
190 
spi_pw_tx_thld_set(const struct device * dev)191 static void spi_pw_tx_thld_set(const struct device *dev)
192 {
193 	uint32_t reg_data;
194 
195 	/* Tx threshold */
196 	reg_data = spi_pw_reg_read(dev, PW_SPI_REG_SITF);
197 	/* mask high water mark bits in tx fifo reg */
198 	reg_data &= ~(PW_SPI_WM_MASK);
199 	/* mask low water mark bits in tx fifo reg */
200 	reg_data &= ~(PW_SPI_WM_MASK << PW_SPI_SITF_LWMTF_SHIFT);
201 	reg_data |= (PW_SPI_SITF_HIGH_WM_DFLT | PW_SPI_SITF_LOW_WM_DFLT);
202 	spi_pw_reg_write(dev, PW_SPI_REG_SITF, reg_data);
203 }
204 
spi_pw_rx_thld_set(const struct device * dev,struct spi_pw_data * spi)205 static void spi_pw_rx_thld_set(const struct device *dev,
206 			       struct spi_pw_data *spi)
207 {
208 	uint32_t reg_data;
209 
210 	/* Rx threshold */
211 	reg_data = spi_pw_reg_read(dev, PW_SPI_REG_SIRF);
212 	reg_data &= (uint32_t) ~(PW_SPI_WM_MASK);
213 	reg_data |= PW_SPI_SIRF_WM_DFLT;
214 	if (spi->ctx.rx_len && spi->ctx.rx_len < spi->fifo_depth) {
215 		reg_data = spi->ctx.rx_len - 1;
216 	}
217 	spi_pw_reg_write(dev, PW_SPI_REG_SIRF, reg_data);
218 }
219 
spi_pw_set_data_size(const struct device * dev,const struct spi_config * config)220 static int spi_pw_set_data_size(const struct device *dev,
221 				const struct spi_config *config)
222 {
223 	uint32_t ctrlr0;
224 
225 	ctrlr0 = spi_pw_reg_read(dev, PW_SPI_REG_CTRLR0);
226 
227 	/* Full duplex mode */
228 	ctrlr0 &= ~(PW_SPI_CTRLR0_MOD_BIT);
229 
230 	ctrlr0 &= PW_SPI_CTRLR0_DATA_MASK;
231 	ctrlr0 &= PW_SPI_CTRLR0_EDSS_MASK;
232 
233 	/* Set the word size */
234 	if (SPI_WORD_SIZE_GET(config->operation) == 4) {
235 		ctrlr0 |= PW_SPI_DATA_SIZE_4_BIT;
236 	} else if (SPI_WORD_SIZE_GET(config->operation) == 8) {
237 		ctrlr0 |= PW_SPI_DATA_SIZE_8_BIT;
238 	} else if (SPI_WORD_SIZE_GET(config->operation) == 16) {
239 		ctrlr0 |= PW_SPI_DATA_SIZE_16_BIT;
240 	} else if (SPI_WORD_SIZE_GET(config->operation) == 32) {
241 		ctrlr0 |= PW_SPI_DATA_SIZE_32_BIT;
242 	} else {
243 		LOG_ERR("Invalid word size");
244 		return -ENOTSUP;
245 	}
246 
247 	spi_pw_reg_write(dev, PW_SPI_REG_CTRLR0, ctrlr0);
248 
249 	return 0;
250 }
251 
spi_pw_config_phase_polarity(const struct device * dev,const struct spi_config * config)252 static void spi_pw_config_phase_polarity(const struct device *dev,
253 					 const struct spi_config *config)
254 {
255 	uint8_t mode;
256 	uint32_t ctrlr1;
257 
258 	ctrlr1 = spi_pw_reg_read(dev, PW_SPI_REG_CTRLR1);
259 
260 	mode = (SPI_MODE_GET(config->operation) & SPI_MODE_CPOL) |
261 	       (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA);
262 
263 	LOG_DBG("mode: 0x%x", (mode >> 1));
264 	switch (mode >> 1) {
265 	case SPI_PW_MODE0:
266 		ctrlr1 &= ~(PW_SPI_CTRL1_SPO_SPH_MASK);
267 		ctrlr1 &= ~(PW_SPI_CTRL1_SPO_BIT);
268 		ctrlr1 &= ~(PW_SPI_CTRL1_SPH_BIT);
269 		break;
270 	case SPI_PW_MODE1:
271 		ctrlr1 &= ~(PW_SPI_CTRL1_SPO_SPH_MASK);
272 		ctrlr1 |= PW_SPI_CTRL1_SPO_BIT;
273 		ctrlr1 &= ~(PW_SPI_CTRL1_SPH_BIT);
274 		break;
275 	case SPI_PW_MODE2:
276 		ctrlr1 &= ~(PW_SPI_CTRL1_SPO_SPH_MASK);
277 		ctrlr1 &= ~(PW_SPI_CTRL1_SPO_BIT);
278 		ctrlr1 |= PW_SPI_CTRL1_SPH_BIT;
279 		break;
280 	case SPI_PW_MODE3:
281 		ctrlr1 |= PW_SPI_CTRL1_SPO_BIT;
282 		ctrlr1 |= PW_SPI_CTRL1_SPH_BIT;
283 		break;
284 	}
285 
286 	/* Set Polarity & Phase  */
287 	spi_pw_reg_write(dev, PW_SPI_REG_CTRLR1, ctrlr1);
288 }
289 
spi_pw_enable_clk(const struct device * dev)290 static void spi_pw_enable_clk(const struct device *dev)
291 {
292 	uint32_t clks;
293 
294 	/*Update M:N value & enable clock */
295 	clks = spi_pw_reg_read(dev, PW_SPI_REG_CLKS);
296 	clks &= ~(PW_SPI_CLKS_MVAL_MASK);
297 	clks &= ~(PW_SPI_CLKS_NVAL_MASK);
298 	clks |= (PW_SPI_CLKS_MVAL | PW_SPI_CLKS_NVAL |
299 		 PW_SPI_CLKS_EN_BIT | PW_SPI_CLKS_UPDATE_BIT);
300 	spi_pw_reg_write(dev, PW_SPI_REG_CLKS, clks);
301 }
302 
spi_pw_config_clk(const struct device * dev,const struct spi_pw_config * info,const struct spi_config * config)303 static void spi_pw_config_clk(const struct device *dev,
304 			      const struct spi_pw_config *info,
305 			      const struct spi_config *config)
306 {
307 	uint32_t ctrlr0, scr;
308 
309 	/* Update scr control bits */
310 	if (!config->frequency) {
311 		scr = PW_SPI_BR_2MHZ;
312 	} else if (config->frequency > PW_SPI_BR_MAX_FRQ) {
313 		scr = (info->clock_freq / PW_SPI_BR_MAX_FRQ) - 1;
314 	} else {
315 		scr = (info->clock_freq / config->frequency) - 1;
316 	}
317 	ctrlr0 = spi_pw_reg_read(dev, PW_SPI_REG_CTRLR0);
318 
319 	ctrlr0 &= ~(PW_SPI_SCR_MASK);
320 	ctrlr0 |= (scr << PW_SPI_SCR_SHIFT);
321 	spi_pw_reg_write(dev, PW_SPI_REG_CTRLR0, ctrlr0);
322 }
323 
spi_pw_completed(const struct device * dev,int err)324 static void spi_pw_completed(const struct device *dev, int err)
325 {
326 	struct spi_pw_data *spi = dev->data;
327 
328 	if (!err && (spi_context_tx_on(&spi->ctx) ||
329 		     spi_context_rx_on(&spi->ctx))) {
330 		return;
331 	}
332 
333 	/* need to give time for FIFOs to drain before issuing more commands */
334 	while (is_pw_ssp_busy(dev)) {
335 	}
336 
337 #ifdef CONFIG_SPI_PW_INTERRUPT
338 	/* Disabling interrupts */
339 	spi_pw_intr_disable(dev);
340 #endif
341 
342 	/* Disabling the controller operation, which also clear's all status bits
343 	 * in status register
344 	 */
345 	spi_pw_ssp_disable(dev);
346 
347 	spi_pw_cs_ctrl_enable(dev, false);
348 
349 	LOG_DBG("SPI transaction completed %s error\n",
350 		err ? "with" : "without");
351 
352 	spi_context_complete(&spi->ctx, dev, err);
353 }
354 
spi_pw_clear_intr(const struct device * dev)355 static void spi_pw_clear_intr(const struct device *dev)
356 {
357 	uint32_t sssr;
358 
359 	sssr = spi_pw_reg_read(dev, PW_SPI_REG_SSSR);
360 	sssr &= ~(PW_SPI_INTR_ERRORS_MASK);
361 	spi_pw_reg_write(dev, PW_SPI_REG_SSSR, sssr);
362 }
363 
spi_pw_get_tx_fifo_level(const struct device * dev)364 static int spi_pw_get_tx_fifo_level(const struct device *dev)
365 {
366 	uint32_t tx_fifo_level;
367 
368 	tx_fifo_level = spi_pw_reg_read(dev, PW_SPI_REG_SITF);
369 
370 	tx_fifo_level = ((tx_fifo_level & PW_SPI_SITF_SITFL_MASK) >>
371 			 PW_SPI_SITF_SITFL_SHIFT);
372 
373 	return tx_fifo_level;
374 }
375 
spi_pw_get_rx_fifo_level(const struct device * dev)376 static int spi_pw_get_rx_fifo_level(const struct device *dev)
377 {
378 	uint32_t rx_fifo_level;
379 
380 	rx_fifo_level = spi_pw_reg_read(dev, PW_SPI_REG_SIRF);
381 	rx_fifo_level = ((rx_fifo_level & PW_SPI_SIRF_SIRFL_MASK) >>
382 			 PW_SPI_SIRF_SIRFL_SHIFT);
383 
384 	return rx_fifo_level;
385 }
386 
spi_pw_reset_tx_fifo_level(const struct device * dev)387 static void spi_pw_reset_tx_fifo_level(const struct device *dev)
388 {
389 	uint32_t tx_fifo_level;
390 
391 	tx_fifo_level = spi_pw_reg_read(dev, PW_SPI_REG_SITF);
392 	tx_fifo_level &= ~(PW_SPI_SITF_SITFL_MASK);
393 	spi_pw_reg_write(dev, PW_SPI_REG_SITF, tx_fifo_level);
394 
395 }
396 
spi_pw_update_rx_fifo_level(uint32_t len,const struct device * dev)397 static void spi_pw_update_rx_fifo_level(uint32_t len,
398 					const struct device *dev)
399 {
400 	uint32_t rx_fifo_level;
401 
402 	rx_fifo_level = spi_pw_reg_read(dev, PW_SPI_REG_SIRF);
403 	rx_fifo_level &= ~(PW_SPI_SIRF_SIRFL_MASK);
404 	rx_fifo_level |= (len << PW_SPI_SIRF_SIRFL_SHIFT);
405 	spi_pw_reg_write(dev, PW_SPI_REG_SIRF, rx_fifo_level);
406 }
407 
spi_pw_tx_data(const struct device * dev)408 static void spi_pw_tx_data(const struct device *dev)
409 {
410 	struct spi_pw_data *spi = dev->data;
411 	uint32_t data = 0U;
412 	int32_t fifo_len;
413 
414 	if (spi_context_rx_on(&spi->ctx)) {
415 		fifo_len = spi->fifo_depth -
416 			   spi_pw_get_tx_fifo_level(dev) -
417 			   spi_pw_get_rx_fifo_level(dev);
418 		if (fifo_len < 0) {
419 			fifo_len = 0U;
420 		}
421 	} else {
422 		fifo_len = spi->fifo_depth - spi_pw_get_tx_fifo_level(dev);
423 	}
424 
425 	while (fifo_len > 0) {
426 		if (spi_context_tx_buf_on(&spi->ctx)) {
427 			switch (spi->dfs) {
428 			case 1:
429 				data = UNALIGNED_GET((uint8_t *)
430 						     (spi->ctx.tx_buf));
431 				break;
432 			case 2:
433 				data = UNALIGNED_GET((uint16_t *)
434 						     (spi->ctx.tx_buf));
435 				break;
436 			case 4:
437 				data = UNALIGNED_GET((uint32_t *)
438 						     (spi->ctx.tx_buf));
439 				break;
440 			}
441 		} else if (spi_context_rx_on(&spi->ctx)) {
442 			if ((int)(spi->ctx.rx_len - spi->fifo_diff) <= 0) {
443 				break;
444 			}
445 
446 			data = 0U;
447 		} else if (spi_context_tx_on(&spi->ctx)) {
448 			data = 0U;
449 		} else {
450 			break;
451 		}
452 
453 		spi_pw_reg_write(dev, PW_SPI_REG_SSDR, data);
454 
455 		spi_context_update_tx(&spi->ctx, spi->dfs, 1);
456 		spi->fifo_diff++;
457 		fifo_len--;
458 	}
459 
460 	if (!spi_context_tx_on(&spi->ctx)) {
461 		spi_pw_reset_tx_fifo_level(dev);
462 	}
463 }
464 
spi_pw_rx_data(const struct device * dev)465 static void spi_pw_rx_data(const struct device *dev)
466 {
467 	struct spi_pw_data *spi = dev->data;
468 
469 	while (spi_pw_get_rx_fifo_level(dev)) {
470 		uint32_t data = spi_pw_reg_read(dev, PW_SPI_REG_SSDR);
471 
472 		if (spi_context_rx_buf_on(&spi->ctx)) {
473 			switch (spi->dfs) {
474 			case 1:
475 				UNALIGNED_PUT(data,
476 					      (uint8_t *)spi->ctx.rx_buf);
477 				break;
478 			case 2:
479 				UNALIGNED_PUT(data,
480 					      (uint16_t *)spi->ctx.rx_buf);
481 				break;
482 			case 4:
483 				UNALIGNED_PUT(data,
484 					      (uint32_t *)spi->ctx.rx_buf);
485 				break;
486 			}
487 		}
488 
489 		spi_context_update_rx(&spi->ctx, spi->dfs, 1);
490 		spi->fifo_diff--;
491 	}
492 
493 	if (!spi->ctx.rx_len && spi->ctx.tx_len < spi->fifo_depth) {
494 		spi_pw_update_rx_fifo_level(spi->ctx.tx_len - 1, dev);
495 	} else if (spi_pw_get_rx_fifo_level(dev) >= spi->ctx.rx_len) {
496 		spi_pw_update_rx_fifo_level(spi->ctx.rx_len - 1, dev);
497 	}
498 }
499 
spi_pw_transfer(const struct device * dev)500 static int spi_pw_transfer(const struct device *dev)
501 {
502 	uint32_t intr_status;
503 	int err;
504 
505 	intr_status = spi_pw_reg_read(dev, PW_SPI_REG_SSSR);
506 
507 	if (intr_status & PW_SPI_SSSR_ROR_BIT) {
508 		LOG_ERR("Receive FIFO overrun");
509 		err = -EIO;
510 		goto out;
511 	}
512 
513 	if (intr_status & PW_SPI_SSSR_TUR_BIT) {
514 		LOG_ERR("Transmit FIFO underrun");
515 		err = -EIO;
516 		goto out;
517 	}
518 
519 	if (intr_status & PW_SPI_SSSR_TINT_BIT) {
520 		LOG_ERR("Receiver timeout interrupt");
521 		err = -EIO;
522 		goto out;
523 	}
524 
525 	err = 0;
526 
527 	if (intr_status & PW_SPI_SSSR_RNE_BIT) {
528 		spi_pw_rx_data(dev);
529 	}
530 
531 	if (intr_status & PW_SPI_SSSR_TNF_BIT) {
532 		spi_pw_tx_data(dev);
533 	}
534 
535 out:
536 	if (err) {
537 		spi_pw_clear_intr(dev);
538 	}
539 
540 	return err;
541 }
542 
spi_pw_configure(const struct device * dev,const struct spi_pw_config * info,struct spi_pw_data * spi,const struct spi_config * config)543 static int spi_pw_configure(const struct device *dev,
544 			    const struct spi_pw_config *info,
545 			    struct spi_pw_data *spi,
546 			    const struct spi_config *config)
547 {
548 	int err;
549 
550 	/* At this point, it's mandatory to set this on the context! */
551 	spi->ctx.config = config;
552 
553 	if (!spi_cs_is_gpio(spi->ctx.config)) {
554 		if (spi->cs_mode == CS_GPIO_MODE) {
555 			LOG_DBG("cs gpio is NULL, switch to hw mode");
556 			spi->cs_mode = CS_HW_MODE;
557 			spi_pw_enable_cs_hw_ctrl(dev);
558 		}
559 	}
560 
561 	if (config->operation & SPI_HALF_DUPLEX) {
562 		LOG_ERR("Half-duplex not supported");
563 		return -ENOTSUP;
564 	}
565 
566 	/* Verify if requested op mode is relevant to this controller */
567 	if (config->operation & SPI_OP_MODE_SLAVE) {
568 		LOG_ERR("Slave mode not supported");
569 		return -ENOTSUP;
570 	}
571 
572 	if ((config->operation & SPI_TRANSFER_LSB) ||
573 	    (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) &&
574 	     (config->operation & (SPI_LINES_DUAL |
575 				   SPI_LINES_QUAD |
576 				   SPI_LINES_OCTAL)))) {
577 		LOG_ERR("Extended mode Unsupported configuration");
578 		return -EINVAL;
579 	}
580 
581 	if (config->operation & SPI_FRAME_FORMAT_TI) {
582 		LOG_ERR("TI frame format not supported");
583 		return -ENOTSUP;
584 	}
585 
586 	if (config->operation & SPI_HOLD_ON_CS) {
587 		LOG_ERR("Chip select hold not supported");
588 		return -ENOTSUP;
589 	}
590 
591 	/* Set mode & data size */
592 	err = spi_pw_set_data_size(dev, config);
593 
594 	if (err) {
595 		LOG_ERR("Invalid data size");
596 		return -ENOTSUP;
597 	}
598 
599 	/* Set Polarity & Phase  */
600 	spi_pw_config_phase_polarity(dev, config);
601 
602 	/* enable clock */
603 	spi_pw_enable_clk(dev);
604 
605 	/* configure */
606 	spi_pw_config_clk(dev, info, config);
607 
608 	return 0;
609 }
610 
transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)611 static int transceive(const struct device *dev,
612 		      const struct spi_config *config,
613 		      const struct spi_buf_set *tx_bufs,
614 		      const struct spi_buf_set *rx_bufs,
615 		      bool asynchronous,
616 		      spi_callback_t cb,
617 		      void *userdata)
618 {
619 	const struct spi_pw_config *info = dev->config;
620 	struct spi_pw_data *spi = dev->data;
621 	int err;
622 
623 	if (!tx_bufs && !rx_bufs) {
624 		LOG_ERR(" Tx & Rx buff null");
625 		return 0;
626 	}
627 
628 	if (asynchronous) {
629 		LOG_ERR("Async not supported");
630 		return -ENOTSUP;
631 	}
632 
633 	spi_context_lock(&spi->ctx, asynchronous, cb, userdata, config);
634 
635 	/* Configure */
636 	err = spi_pw_configure(dev, info, spi, config);
637 	if (err) {
638 		LOG_ERR("spi pw config fail");
639 		goto out;
640 	}
641 
642 	/* Frame size in number of data bytes */
643 	spi->dfs = spi_pw_get_frame_size(config);
644 	spi_context_buffers_setup(&spi->ctx, tx_bufs, rx_bufs,
645 				  spi->dfs);
646 
647 	spi->fifo_diff = 0U;
648 
649 	/* Tx threshold */
650 	spi_pw_tx_thld_set(dev);
651 
652 	/* Rx threshold */
653 	spi_pw_rx_thld_set(dev, spi);
654 
655 	spi_pw_cs_ctrl_enable(dev, true);
656 
657 	/* Enable ssp operation */
658 	spi_pw_ssp_enable(dev);
659 
660 #ifdef CONFIG_SPI_PW_INTERRUPT
661 	LOG_DBG("Interrupt Mode");
662 
663 	/* Enable interrupts */
664 	if (rx_bufs) {
665 		spi_pw_intr_enable(dev, true);
666 	} else {
667 		spi_pw_intr_enable(dev, false);
668 	}
669 
670 	err = spi_context_wait_for_completion(&spi->ctx);
671 #else
672 	LOG_DBG("Polling Mode");
673 
674 	do {
675 		err = spi_pw_transfer(dev);
676 	} while ((!err) && is_spi_transfer_ongoing(spi));
677 
678 	spi_pw_completed(dev, err);
679 #endif
680 
681 out:
682 	spi_context_release(&spi->ctx, err);
683 	return err;
684 }
685 
spi_pw_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)686 static int spi_pw_transceive(const struct device *dev,
687 			     const struct spi_config *config,
688 			     const struct spi_buf_set *tx_bufs,
689 			     const struct spi_buf_set *rx_bufs)
690 {
691 	LOG_DBG("%p, %p, %p\n", dev, tx_bufs, rx_bufs);
692 	return transceive(dev, config, tx_bufs, rx_bufs,
693 			  false, NULL, NULL);
694 }
695 
696 #ifdef CONFIG_SPI_ASYNC
spi_pw_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)697 static int spi_pw_transceive_async(const struct device *dev,
698 				   const struct spi_config *config,
699 				   const struct spi_buf_set *tx_bufs,
700 				   const struct spi_buf_set *rx_bufs,
701 				   spi_callback_t cb,
702 				   void *userdata)
703 {
704 	LOG_DBG("%p, %p, %p, %p, %p\n", dev, tx_bufs, rx_bufs,
705 					cb, userdata);
706 
707 	return transceive(dev, config, tx_bufs, rx_bufs, true,
708 			  cb, userdata);
709 }
710 #endif /* CONFIG_SPI_ASYNC */
711 
spi_pw_release(const struct device * dev,const struct spi_config * config)712 static int spi_pw_release(const struct device *dev,
713 			  const struct spi_config *config)
714 {
715 	struct spi_pw_data *spi = dev->data;
716 
717 	if (!spi_context_configured(&spi->ctx, config)) {
718 		return -EINVAL;
719 	}
720 
721 	spi_context_unlock_unconditionally(&spi->ctx);
722 
723 	return 0;
724 }
725 
726 #ifdef CONFIG_SPI_PW_INTERRUPT
spi_pw_isr(const void * arg)727 static void spi_pw_isr(const void *arg)
728 {
729 	const struct device *dev = (const struct device *)arg;
730 	int err;
731 
732 	err = spi_pw_transfer(dev);
733 	spi_pw_completed(dev, err);
734 }
735 #endif
736 
737 static DEVICE_API(spi, pw_spi_api) = {
738 	.transceive = spi_pw_transceive,
739 	.release = spi_pw_release,
740 #ifdef CONFIG_SPI_ASYNC
741 	.transceive_async = spi_pw_transceive_async,
742 #endif  /* CONFIG_SPI_ASYNC */
743 #ifdef CONFIG_SPI_RTIO
744 	.iodev_submit = spi_rtio_iodev_default_submit,
745 #endif
746 };
747 
spi_pw_init(const struct device * dev)748 static int spi_pw_init(const struct device *dev)
749 {
750 	const struct spi_pw_config *info = dev->config;
751 	struct spi_pw_data *spi = dev->data;
752 	int err;
753 
754 #if DT_ANY_INST_ON_BUS_STATUS_OKAY(pcie)
755 	if (info->pcie) {
756 		struct pcie_bar mbar;
757 
758 		if (info->pcie->bdf == PCIE_BDF_NONE) {
759 			LOG_ERR("Cannot probe PCI device");
760 			return -ENODEV;
761 		}
762 
763 		if (!pcie_probe_mbar(info->pcie->bdf, 0, &mbar)) {
764 			LOG_ERR("MBAR not found");
765 			return -EINVAL;
766 		}
767 
768 		pcie_set_cmd(info->pcie->bdf, PCIE_CONF_CMDSTAT_MEM,
769 			     true);
770 
771 		device_map(DEVICE_MMIO_RAM_PTR(dev), mbar.phys_addr,
772 			   mbar.size, K_MEM_CACHE_NONE);
773 
774 		pcie_set_cmd(info->pcie->bdf,
775 			     PCIE_CONF_CMDSTAT_MASTER,
776 			     true);
777 
778 	} else {
779 		DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE);
780 	}
781 #else
782 	DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE);
783 #endif
784 
785 	/* Bring ssp out of reset */
786 	spi_pw_ssp_reset(dev);
787 
788 	/* Disable ssp operation */
789 	spi_pw_ssp_disable(dev);
790 
791 	/* Chip select control */
792 	spi_pw_cs_ctrl_init(dev);
793 
794 #if defined(CONFIG_SPI_PW_INTERRUPT)
795 	/* Mask interrupts */
796 	spi_pw_intr_disable(dev);
797 
798 	/* Init and connect IRQ */
799 	info->irq_config(dev);
800 #endif
801 
802 	if (spi->cs_mode == CS_GPIO_MODE) {
803 		err = spi_context_cs_configure_all(&spi->ctx);
804 		if (err < 0) {
805 			LOG_ERR("Failed to configure CS pins: %d", err);
806 			return err;
807 		}
808 	}
809 
810 	spi_context_unlock_unconditionally(&spi->ctx);
811 
812 	LOG_DBG("SPI pw init success");
813 
814 	return 0;
815 }
816 
817 #define INIT_PCIE0(n)
818 #define INIT_PCIE1(n) DEVICE_PCIE_INST_INIT(n, pcie),
819 #define INIT_PCIE(n) _CONCAT(INIT_PCIE, DT_INST_ON_BUS(n, pcie))(n)
820 
821 #define DEFINE_PCIE0(n)
822 #define DEFINE_PCIE1(n) DEVICE_PCIE_INST_DECLARE(n)
823 #define SPI_PCIE_DEFINE(n) _CONCAT(DEFINE_PCIE, DT_INST_ON_BUS(n, pcie))(n)
824 
825 #ifdef CONFIG_SPI_PW_INTERRUPT
826 
827 #define SPI_INTEL_IRQ_FLAGS_SENSE0(n) 0
828 #define SPI_INTEL_IRQ_FLAGS_SENSE1(n) DT_INST_IRQ(n, sense)
829 #define SPI_INTEL_IRQ_FLAGS(n) \
830 	_CONCAT(SPI_INTEL_IRQ_FLAGS_SENSE, DT_INST_IRQ_HAS_CELL(n, sense))(n)
831 
832 #define SPI_INTEL_IRQ_INIT(n)						     \
833 	BUILD_ASSERT(IS_ENABLED(CONFIG_DYNAMIC_INTERRUPTS),		     \
834 		     "SPI PCIe requires dynamic interrupts");		     \
835 	static void spi_##n##_irq_init(const struct device *dev)	     \
836 	{								     \
837 		const struct spi_pw_config *info = dev->config;		     \
838 		unsigned int irq;					     \
839 		if (DT_INST_IRQN(n) == PCIE_IRQ_DETECT) {		     \
840 			irq = pcie_alloc_irq(info->pcie->bdf);		     \
841 			if (irq == PCIE_CONF_INTR_IRQ_NONE) {		     \
842 				return;					     \
843 			}						     \
844 		} else {						     \
845 			irq = DT_INST_IRQN(n);				     \
846 			pcie_conf_write(info->pcie->bdf,		     \
847 					PCIE_CONF_INTR, irq);		     \
848 		}							     \
849 		pcie_connect_dynamic_irq(info->pcie->bdf, irq,		     \
850 					 DT_INST_IRQ(n, priority),	     \
851 					 (void (*)(const void *))spi_pw_isr, \
852 					 DEVICE_DT_INST_GET(n),		     \
853 					 SPI_INTEL_IRQ_FLAGS(n));	     \
854 		pcie_irq_enable(info->pcie->bdf, irq);			     \
855 		LOG_DBG("lpass spi Configure irq %d", irq);		     \
856 	}
857 
858 #define SPI_PW_DEV_INIT(n)					     \
859 	static struct spi_pw_data spi_##n##_data = {		     \
860 		SPI_CONTEXT_INIT_LOCK(spi_##n##_data, ctx),	     \
861 		SPI_CONTEXT_INIT_SYNC(spi_##n##_data, ctx),	     \
862 		SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \
863 		.cs_mode = DT_INST_PROP(n, pw_cs_mode),		     \
864 		.cs_output = DT_INST_PROP(n, pw_cs_output),	     \
865 		.fifo_depth = DT_INST_PROP(n, pw_fifo_depth),	     \
866 	};							     \
867 	SPI_PCIE_DEFINE(n);					     \
868 	SPI_INTEL_IRQ_INIT(n)					     \
869 	static const struct spi_pw_config spi_##n##_config = {	     \
870 		.irq_config = spi_##n##_irq_init,		     \
871 		.clock_freq = DT_INST_PROP(n, clock_frequency),	     \
872 		INIT_PCIE(n)					     \
873 	};							     \
874 	SPI_DEVICE_DT_INST_DEFINE(n, spi_pw_init, NULL,		     \
875 			      &spi_##n##_data, &spi_##n##_config,    \
876 			      POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \
877 			      &pw_spi_api);
878 #else
879 
880 #define SPI_PW_DEV_INIT(n)					     \
881 	static struct spi_pw_data spi_##n##_data = {		     \
882 		SPI_CONTEXT_INIT_LOCK(spi_##n##_data, ctx),	     \
883 		SPI_CONTEXT_INIT_SYNC(spi_##n##_data, ctx),	     \
884 		SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \
885 		.cs_mode = DT_INST_PROP(n, pw_cs_mode),		     \
886 		.cs_output = DT_INST_PROP(n, pw_cs_output),	     \
887 		.fifo_depth = DT_INST_PROP(n, pw_fifo_depth),	     \
888 	};							     \
889 	SPI_PCIE_DEFINE(n);					     \
890 	static const struct spi_pw_config spi_##n##_config = {	     \
891 		.clock_freq = DT_INST_PROP(n, clock_frequency),	     \
892 		INIT_PCIE(n)					     \
893 	};							     \
894 	SPI_DEVICE_DT_INST_DEFINE(n, spi_pw_init, NULL,		     \
895 			      &spi_##n##_data, &spi_##n##_config,    \
896 			      POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \
897 			      &pw_spi_api);
898 
899 #endif
900 
901 DT_INST_FOREACH_STATUS_OKAY(SPI_PW_DEV_INIT)
902