1 /*
2  * Copyright (c) 2018 SiFive Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT sifive_spi0
8 
9 #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL
10 #include <zephyr/logging/log.h>
11 LOG_MODULE_REGISTER(spi_sifive);
12 
13 #include "spi_sifive.h"
14 
15 #include <soc.h>
16 #include <stdbool.h>
17 
18 /* Helper Functions */
19 
20 static ALWAYS_INLINE
sys_set_mask(mem_addr_t addr,uint32_t mask,uint32_t value)21 void sys_set_mask(mem_addr_t addr, uint32_t mask, uint32_t value)
22 {
23 	uint32_t temp = sys_read32(addr);
24 
25 	temp &= ~(mask);
26 	temp |= value;
27 
28 	sys_write32(temp, addr);
29 }
30 
spi_config(const struct device * dev,uint32_t frequency,uint16_t operation)31 static int spi_config(const struct device *dev, uint32_t frequency,
32 	       uint16_t operation)
33 {
34 	uint32_t div;
35 	uint32_t fmt_len;
36 
37 	if (operation & SPI_HALF_DUPLEX) {
38 		return -ENOTSUP;
39 	}
40 
41 	if (SPI_OP_MODE_GET(operation) != SPI_OP_MODE_MASTER) {
42 		return -ENOTSUP;
43 	}
44 
45 	if (operation & SPI_MODE_LOOP) {
46 		return -ENOTSUP;
47 	}
48 
49 	/* Set the SPI frequency */
50 	div = (SPI_CFG(dev)->f_sys / (frequency * 2U)) - 1;
51 	sys_write32((SF_SCKDIV_DIV_MASK & div), SPI_REG(dev, REG_SCKDIV));
52 
53 	/* Set the polarity */
54 	if (operation & SPI_MODE_CPOL) {
55 		/* If CPOL is set, then SCK idles at logical 1 */
56 		sys_set_bit(SPI_REG(dev, REG_SCKMODE), SF_SCKMODE_POL);
57 	} else {
58 		/* SCK idles at logical 0 */
59 		sys_clear_bit(SPI_REG(dev, REG_SCKMODE), SF_SCKMODE_POL);
60 	}
61 
62 	/* Set the phase */
63 	if (operation & SPI_MODE_CPHA) {
64 		/*
65 		 * If CPHA is set, then data is sampled
66 		 * on the trailing SCK edge
67 		 */
68 		sys_set_bit(SPI_REG(dev, REG_SCKMODE), SF_SCKMODE_PHA);
69 	} else {
70 		/* Data is sampled on the leading SCK edge */
71 		sys_clear_bit(SPI_REG(dev, REG_SCKMODE), SF_SCKMODE_PHA);
72 	}
73 
74 	/* Get the frame length */
75 	fmt_len = SPI_WORD_SIZE_GET(operation);
76 	if (fmt_len > SF_FMT_LEN_MASK) {
77 		return -ENOTSUP;
78 	}
79 
80 	/* Set the frame length */
81 	fmt_len = fmt_len << SF_FMT_LEN;
82 	fmt_len &= SF_FMT_LEN_MASK;
83 	sys_set_mask(SPI_REG(dev, REG_FMT), SF_FMT_LEN_MASK, fmt_len);
84 
85 	if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) &&
86 	    (operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) {
87 		return -ENOTSUP;
88 	}
89 	/* Set single line operation */
90 	sys_set_mask(SPI_REG(dev, REG_FMT),
91 		SF_FMT_PROTO_MASK,
92 		SF_FMT_PROTO_SINGLE);
93 
94 	/* Set the endianness */
95 	if (operation & SPI_TRANSFER_LSB) {
96 		sys_set_bit(SPI_REG(dev, REG_FMT), SF_FMT_ENDIAN);
97 	} else {
98 		sys_clear_bit(SPI_REG(dev, REG_FMT), SF_FMT_ENDIAN);
99 	}
100 
101 	return 0;
102 }
103 
spi_sifive_send_available(const struct device * dev)104 static ALWAYS_INLINE bool spi_sifive_send_available(const struct device *dev)
105 {
106 	return !(sys_read32(SPI_REG(dev, REG_TXDATA)) & SF_TXDATA_FULL);
107 }
108 
109 static ALWAYS_INLINE
spi_sifive_send(const struct device * dev,uint8_t frame)110 void spi_sifive_send(const struct device *dev, uint8_t frame)
111 {
112 	sys_write32((uint32_t) frame, SPI_REG(dev, REG_TXDATA));
113 }
114 
115 static ALWAYS_INLINE
spi_sifive_recv(const struct device * dev,uint8_t * val)116 bool spi_sifive_recv(const struct device *dev, uint8_t *val)
117 {
118 	uint32_t reg = sys_read32(SPI_REG(dev, REG_RXDATA));
119 
120 	if (reg & SF_RXDATA_EMPTY) {
121 		return false;
122 	}
123 	*val = (uint8_t) reg;
124 	return true;
125 }
126 
spi_sifive_xfer(const struct device * dev,const bool hw_cs_control)127 static void spi_sifive_xfer(const struct device *dev, const bool hw_cs_control)
128 {
129 	struct spi_context *ctx = &SPI_DATA(dev)->ctx;
130 	uint8_t txd, rxd;
131 	int queued_frames = 0;
132 
133 	while (spi_context_tx_on(ctx) || spi_context_rx_on(ctx) || queued_frames > 0) {
134 		bool send = false;
135 
136 		/* As long as frames remain to be sent, attempt to queue them on Tx FIFO. If
137 		 * the FIFO is full then another attempt will be made next pass. If Rx length
138 		 * > Tx length then queue dummy Tx in order to read the requested Rx data.
139 		 */
140 		if (spi_context_tx_buf_on(ctx)) {
141 			send = true;
142 			txd = *ctx->tx_buf;
143 		} else if (queued_frames == 0) {  /* Implies spi_context_rx_on(). */
144 			send = true;
145 			txd = 0U;
146 		}
147 
148 		if (send && spi_sifive_send_available(dev)) {
149 			spi_sifive_send(dev, txd);
150 			queued_frames++;
151 			spi_context_update_tx(ctx, 1, 1);
152 		}
153 
154 		if (queued_frames > 0 && spi_sifive_recv(dev, &rxd)) {
155 			if (spi_context_rx_buf_on(ctx)) {
156 				*ctx->rx_buf = rxd;
157 			}
158 			queued_frames--;
159 			spi_context_update_rx(ctx, 1, 1);
160 		}
161 	}
162 
163 	/* Deassert the CS line */
164 	if (!hw_cs_control) {
165 		spi_context_cs_control(&SPI_DATA(dev)->ctx, false);
166 	} else {
167 		sys_write32(SF_CSMODE_OFF, SPI_REG(dev, REG_CSMODE));
168 	}
169 
170 	spi_context_complete(ctx, dev, 0);
171 }
172 
173 /* API Functions */
174 
spi_sifive_init(const struct device * dev)175 static int spi_sifive_init(const struct device *dev)
176 {
177 	int err;
178 #ifdef CONFIG_PINCTRL
179 	struct spi_sifive_cfg *cfg = (struct spi_sifive_cfg *)dev->config;
180 #endif
181 	/* Disable SPI Flash mode */
182 	sys_clear_bit(SPI_REG(dev, REG_FCTRL), SF_FCTRL_EN);
183 
184 	err = spi_context_cs_configure_all(&SPI_DATA(dev)->ctx);
185 	if (err < 0) {
186 		return err;
187 	}
188 
189 #ifdef CONFIG_PINCTRL
190 	err = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
191 	if (err < 0) {
192 		return err;
193 	}
194 #endif
195 
196 	/* Make sure the context is unlocked */
197 	spi_context_unlock_unconditionally(&SPI_DATA(dev)->ctx);
198 	return 0;
199 }
200 
spi_sifive_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)201 static int spi_sifive_transceive(const struct device *dev,
202 			  const struct spi_config *config,
203 			  const struct spi_buf_set *tx_bufs,
204 			  const struct spi_buf_set *rx_bufs)
205 {
206 	int rc = 0;
207 	bool hw_cs_control = false;
208 
209 	/* Lock the SPI Context */
210 	spi_context_lock(&SPI_DATA(dev)->ctx, false, NULL, NULL, config);
211 
212 	/* Configure the SPI bus */
213 	SPI_DATA(dev)->ctx.config = config;
214 
215 	/*
216 	 * If the chip select configuration is not present, we'll ask the
217 	 * SPI peripheral itself to control the CS line
218 	 */
219 	if (!spi_cs_is_gpio(config)) {
220 		hw_cs_control = true;
221 	}
222 
223 	if (!hw_cs_control) {
224 		/*
225 		 * If the user has requested manual GPIO control, ask the
226 		 * context for control and disable HW control
227 		 */
228 		sys_write32(SF_CSMODE_OFF, SPI_REG(dev, REG_CSMODE));
229 	} else {
230 		/*
231 		 * Tell the hardware to control the requested CS pin.
232 		 * NOTE:
233 		 *	For the SPI peripheral, the pin number is not the
234 		 *	GPIO pin, but the index into the list of available
235 		 *	CS lines for the SPI peripheral.
236 		 */
237 		sys_write32(config->slave, SPI_REG(dev, REG_CSID));
238 		sys_write32(SF_CSMODE_OFF, SPI_REG(dev, REG_CSMODE));
239 	}
240 
241 	rc = spi_config(dev, config->frequency, config->operation);
242 	if (rc < 0) {
243 		spi_context_release(&SPI_DATA(dev)->ctx, rc);
244 		return rc;
245 	}
246 
247 	spi_context_buffers_setup(&SPI_DATA(dev)->ctx, tx_bufs, rx_bufs, 1);
248 
249 	/* Assert the CS line */
250 	if (!hw_cs_control) {
251 		spi_context_cs_control(&SPI_DATA(dev)->ctx, true);
252 	} else {
253 		sys_write32(SF_CSMODE_HOLD, SPI_REG(dev, REG_CSMODE));
254 	}
255 
256 	/* Perform transfer */
257 	spi_sifive_xfer(dev, hw_cs_control);
258 
259 	rc = spi_context_wait_for_completion(&SPI_DATA(dev)->ctx);
260 
261 	spi_context_release(&SPI_DATA(dev)->ctx, rc);
262 
263 	return rc;
264 }
265 
spi_sifive_release(const struct device * dev,const struct spi_config * config)266 static int spi_sifive_release(const struct device *dev,
267 		       const struct spi_config *config)
268 {
269 	spi_context_unlock_unconditionally(&SPI_DATA(dev)->ctx);
270 	return 0;
271 }
272 
273 /* Device Instantiation */
274 
275 static DEVICE_API(spi, spi_sifive_api) = {
276 	.transceive = spi_sifive_transceive,
277 #ifdef CONFIG_SPI_RTIO
278 	.iodev_submit = spi_rtio_iodev_default_submit,
279 #endif
280 	.release = spi_sifive_release,
281 };
282 
283 #define SPI_INIT(n)	\
284 	PINCTRL_DT_INST_DEFINE(n); \
285 	static struct spi_sifive_data spi_sifive_data_##n = { \
286 		SPI_CONTEXT_INIT_LOCK(spi_sifive_data_##n, ctx), \
287 		SPI_CONTEXT_INIT_SYNC(spi_sifive_data_##n, ctx), \
288 		SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx)	\
289 	}; \
290 	static struct spi_sifive_cfg spi_sifive_cfg_##n = { \
291 		.base = DT_INST_REG_ADDR_BY_NAME(n, control), \
292 		.f_sys = SIFIVE_PERIPHERAL_CLOCK_FREQUENCY, \
293 		.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
294 	}; \
295 	SPI_DEVICE_DT_INST_DEFINE(n, \
296 			spi_sifive_init, \
297 			NULL, \
298 			&spi_sifive_data_##n, \
299 			&spi_sifive_cfg_##n, \
300 			POST_KERNEL, \
301 			CONFIG_SPI_INIT_PRIORITY, \
302 			&spi_sifive_api);
303 
304 DT_INST_FOREACH_STATUS_OKAY(SPI_INIT)
305