1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Synquacer HSSPI controller driver
4 //
5 // Copyright (c) 2015-2018 Socionext Inc.
6 // Copyright (c) 2018-2019 Linaro Ltd.
7 //
8 
9 #include <linux/acpi.h>
10 #include <linux/delay.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/scatterlist.h>
18 #include <linux/slab.h>
19 #include <linux/spi/spi.h>
20 #include <linux/spinlock.h>
21 #include <linux/clk.h>
22 
23 /* HSSPI register address definitions */
24 #define SYNQUACER_HSSPI_REG_MCTRL	0x00
25 #define SYNQUACER_HSSPI_REG_PCC0	0x04
26 #define SYNQUACER_HSSPI_REG_PCC(n)	(SYNQUACER_HSSPI_REG_PCC0 + (n) * 4)
27 #define SYNQUACER_HSSPI_REG_TXF		0x14
28 #define SYNQUACER_HSSPI_REG_TXE		0x18
29 #define SYNQUACER_HSSPI_REG_TXC		0x1C
30 #define SYNQUACER_HSSPI_REG_RXF		0x20
31 #define SYNQUACER_HSSPI_REG_RXE		0x24
32 #define SYNQUACER_HSSPI_REG_RXC		0x28
33 #define SYNQUACER_HSSPI_REG_FAULTF	0x2C
34 #define SYNQUACER_HSSPI_REG_FAULTC	0x30
35 #define SYNQUACER_HSSPI_REG_DMCFG	0x34
36 #define SYNQUACER_HSSPI_REG_DMSTART	0x38
37 #define SYNQUACER_HSSPI_REG_DMBCC	0x3C
38 #define SYNQUACER_HSSPI_REG_DMSTATUS	0x40
39 #define SYNQUACER_HSSPI_REG_FIFOCFG	0x4C
40 #define SYNQUACER_HSSPI_REG_TX_FIFO	0x50
41 #define SYNQUACER_HSSPI_REG_RX_FIFO	0x90
42 #define SYNQUACER_HSSPI_REG_MID		0xFC
43 
44 /* HSSPI register bit definitions */
45 #define SYNQUACER_HSSPI_MCTRL_MEN			BIT(0)
46 #define SYNQUACER_HSSPI_MCTRL_COMMAND_SEQUENCE_EN	BIT(1)
47 #define SYNQUACER_HSSPI_MCTRL_CDSS			BIT(3)
48 #define SYNQUACER_HSSPI_MCTRL_MES			BIT(4)
49 #define SYNQUACER_HSSPI_MCTRL_SYNCON			BIT(5)
50 
51 #define SYNQUACER_HSSPI_PCC_CPHA		BIT(0)
52 #define SYNQUACER_HSSPI_PCC_CPOL		BIT(1)
53 #define SYNQUACER_HSSPI_PCC_ACES		BIT(2)
54 #define SYNQUACER_HSSPI_PCC_RTM			BIT(3)
55 #define SYNQUACER_HSSPI_PCC_SSPOL		BIT(4)
56 #define SYNQUACER_HSSPI_PCC_SDIR		BIT(7)
57 #define SYNQUACER_HSSPI_PCC_SENDIAN		BIT(8)
58 #define SYNQUACER_HSSPI_PCC_SAFESYNC		BIT(16)
59 #define SYNQUACER_HSSPI_PCC_SS2CD_SHIFT		5U
60 #define SYNQUACER_HSSPI_PCC_CDRS_MASK		0x7f
61 #define SYNQUACER_HSSPI_PCC_CDRS_SHIFT		9U
62 
63 #define SYNQUACER_HSSPI_TXF_FIFO_FULL		BIT(0)
64 #define SYNQUACER_HSSPI_TXF_FIFO_EMPTY		BIT(1)
65 #define SYNQUACER_HSSPI_TXF_SLAVE_RELEASED	BIT(6)
66 
67 #define SYNQUACER_HSSPI_TXE_FIFO_FULL		BIT(0)
68 #define SYNQUACER_HSSPI_TXE_FIFO_EMPTY		BIT(1)
69 #define SYNQUACER_HSSPI_TXE_SLAVE_RELEASED	BIT(6)
70 
71 #define SYNQUACER_HSSPI_RXF_FIFO_MORE_THAN_THRESHOLD		BIT(5)
72 #define SYNQUACER_HSSPI_RXF_SLAVE_RELEASED			BIT(6)
73 
74 #define SYNQUACER_HSSPI_RXE_FIFO_MORE_THAN_THRESHOLD		BIT(5)
75 #define SYNQUACER_HSSPI_RXE_SLAVE_RELEASED			BIT(6)
76 
77 #define SYNQUACER_HSSPI_DMCFG_SSDC		BIT(1)
78 #define SYNQUACER_HSSPI_DMCFG_MSTARTEN		BIT(2)
79 
80 #define SYNQUACER_HSSPI_DMSTART_START		BIT(0)
81 #define SYNQUACER_HSSPI_DMSTOP_STOP		BIT(8)
82 #define SYNQUACER_HSSPI_DMPSEL_CS_MASK		0x3
83 #define SYNQUACER_HSSPI_DMPSEL_CS_SHIFT		16U
84 #define SYNQUACER_HSSPI_DMTRP_BUS_WIDTH_SHIFT	24U
85 #define SYNQUACER_HSSPI_DMTRP_DATA_MASK		0x3
86 #define SYNQUACER_HSSPI_DMTRP_DATA_SHIFT	26U
87 #define SYNQUACER_HSSPI_DMTRP_DATA_TXRX		0
88 #define SYNQUACER_HSSPI_DMTRP_DATA_RX		1
89 #define SYNQUACER_HSSPI_DMTRP_DATA_TX		2
90 
91 #define SYNQUACER_HSSPI_DMSTATUS_RX_DATA_MASK	0x1f
92 #define SYNQUACER_HSSPI_DMSTATUS_RX_DATA_SHIFT	8U
93 #define SYNQUACER_HSSPI_DMSTATUS_TX_DATA_MASK	0x1f
94 #define SYNQUACER_HSSPI_DMSTATUS_TX_DATA_SHIFT	16U
95 
96 #define SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_MASK	0xf
97 #define SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_SHIFT	0U
98 #define SYNQUACER_HSSPI_FIFOCFG_TX_THRESHOLD_MASK	0xf
99 #define SYNQUACER_HSSPI_FIFOCFG_TX_THRESHOLD_SHIFT	4U
100 #define SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_MASK		0x3
101 #define SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_SHIFT	8U
102 #define SYNQUACER_HSSPI_FIFOCFG_RX_FLUSH		BIT(11)
103 #define SYNQUACER_HSSPI_FIFOCFG_TX_FLUSH		BIT(12)
104 
105 #define SYNQUACER_HSSPI_FIFO_DEPTH		16U
106 #define SYNQUACER_HSSPI_FIFO_TX_THRESHOLD	4U
107 #define SYNQUACER_HSSPI_FIFO_RX_THRESHOLD \
108 	(SYNQUACER_HSSPI_FIFO_DEPTH - SYNQUACER_HSSPI_FIFO_TX_THRESHOLD)
109 
110 #define SYNQUACER_HSSPI_TRANSFER_MODE_TX	BIT(1)
111 #define SYNQUACER_HSSPI_TRANSFER_MODE_RX	BIT(2)
112 #define SYNQUACER_HSSPI_TRANSFER_TMOUT_MSEC	2000U
113 #define SYNQUACER_HSSPI_ENABLE_TMOUT_MSEC	1000U
114 
115 #define SYNQUACER_HSSPI_CLOCK_SRC_IHCLK		0
116 #define SYNQUACER_HSSPI_CLOCK_SRC_IPCLK		1
117 
118 #define SYNQUACER_HSSPI_NUM_CHIP_SELECT		4U
119 #define SYNQUACER_HSSPI_IRQ_NAME_MAX		32U
120 
121 struct synquacer_spi {
122 	struct device *dev;
123 	struct completion transfer_done;
124 	unsigned int cs;
125 	unsigned int bpw;
126 	unsigned int mode;
127 	unsigned int speed;
128 	bool aces, rtm;
129 	void *rx_buf;
130 	const void *tx_buf;
131 	struct clk *clk;
132 	int clk_src_type;
133 	void __iomem *regs;
134 	u32 tx_words, rx_words;
135 	unsigned int bus_width;
136 	unsigned int transfer_mode;
137 	char rx_irq_name[SYNQUACER_HSSPI_IRQ_NAME_MAX];
138 	char tx_irq_name[SYNQUACER_HSSPI_IRQ_NAME_MAX];
139 };
140 
read_fifo(struct synquacer_spi * sspi)141 static int read_fifo(struct synquacer_spi *sspi)
142 {
143 	u32 len = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTATUS);
144 
145 	len = (len >> SYNQUACER_HSSPI_DMSTATUS_RX_DATA_SHIFT) &
146 	       SYNQUACER_HSSPI_DMSTATUS_RX_DATA_MASK;
147 	len = min(len, sspi->rx_words);
148 
149 	switch (sspi->bpw) {
150 	case 8: {
151 		u8 *buf = sspi->rx_buf;
152 
153 		ioread8_rep(sspi->regs + SYNQUACER_HSSPI_REG_RX_FIFO,
154 			    buf, len);
155 		sspi->rx_buf = buf + len;
156 		break;
157 	}
158 	case 16: {
159 		u16 *buf = sspi->rx_buf;
160 
161 		ioread16_rep(sspi->regs + SYNQUACER_HSSPI_REG_RX_FIFO,
162 			     buf, len);
163 		sspi->rx_buf = buf + len;
164 		break;
165 	}
166 	case 24:
167 		/* fallthrough, should use 32-bits access */
168 	case 32: {
169 		u32 *buf = sspi->rx_buf;
170 
171 		ioread32_rep(sspi->regs + SYNQUACER_HSSPI_REG_RX_FIFO,
172 			     buf, len);
173 		sspi->rx_buf = buf + len;
174 		break;
175 	}
176 	default:
177 		return -EINVAL;
178 	}
179 
180 	sspi->rx_words -= len;
181 	return 0;
182 }
183 
write_fifo(struct synquacer_spi * sspi)184 static int write_fifo(struct synquacer_spi *sspi)
185 {
186 	u32 len = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTATUS);
187 
188 	len = (len >> SYNQUACER_HSSPI_DMSTATUS_TX_DATA_SHIFT) &
189 	       SYNQUACER_HSSPI_DMSTATUS_TX_DATA_MASK;
190 	len = min(SYNQUACER_HSSPI_FIFO_DEPTH - len,
191 		    sspi->tx_words);
192 
193 	switch (sspi->bpw) {
194 	case 8: {
195 		const u8 *buf = sspi->tx_buf;
196 
197 		iowrite8_rep(sspi->regs + SYNQUACER_HSSPI_REG_TX_FIFO,
198 			     buf, len);
199 		sspi->tx_buf = buf + len;
200 		break;
201 	}
202 	case 16: {
203 		const u16 *buf = sspi->tx_buf;
204 
205 		iowrite16_rep(sspi->regs + SYNQUACER_HSSPI_REG_TX_FIFO,
206 			      buf, len);
207 		sspi->tx_buf = buf + len;
208 		break;
209 	}
210 	case 24:
211 		/* fallthrough, should use 32-bits access */
212 	case 32: {
213 		const u32 *buf = sspi->tx_buf;
214 
215 		iowrite32_rep(sspi->regs + SYNQUACER_HSSPI_REG_TX_FIFO,
216 			      buf, len);
217 		sspi->tx_buf = buf + len;
218 		break;
219 	}
220 	default:
221 		return -EINVAL;
222 	}
223 
224 	sspi->tx_words -= len;
225 	return 0;
226 }
227 
synquacer_spi_config(struct spi_master * master,struct spi_device * spi,struct spi_transfer * xfer)228 static int synquacer_spi_config(struct spi_master *master,
229 				struct spi_device *spi,
230 				struct spi_transfer *xfer)
231 {
232 	struct synquacer_spi *sspi = spi_master_get_devdata(master);
233 	unsigned int speed, mode, bpw, cs, bus_width, transfer_mode;
234 	u32 rate, val, div;
235 
236 	/* Full Duplex only on 1-bit wide bus */
237 	if (xfer->rx_buf && xfer->tx_buf &&
238 	    (xfer->rx_nbits != 1 || xfer->tx_nbits != 1)) {
239 		dev_err(sspi->dev,
240 			"RX and TX bus widths must be 1-bit for Full-Duplex!\n");
241 		return -EINVAL;
242 	}
243 
244 	if (xfer->tx_buf) {
245 		bus_width = xfer->tx_nbits;
246 		transfer_mode = SYNQUACER_HSSPI_TRANSFER_MODE_TX;
247 	} else {
248 		bus_width = xfer->rx_nbits;
249 		transfer_mode = SYNQUACER_HSSPI_TRANSFER_MODE_RX;
250 	}
251 
252 	mode = spi->mode;
253 	cs = spi_get_chipselect(spi, 0);
254 	speed = xfer->speed_hz;
255 	bpw = xfer->bits_per_word;
256 
257 	/* return if nothing to change */
258 	if (speed == sspi->speed &&
259 		bus_width == sspi->bus_width && bpw == sspi->bpw &&
260 		mode == sspi->mode && cs == sspi->cs &&
261 		transfer_mode == sspi->transfer_mode) {
262 		return 0;
263 	}
264 
265 	sspi->transfer_mode = transfer_mode;
266 	rate = master->max_speed_hz;
267 
268 	div = DIV_ROUND_UP(rate, speed);
269 	if (div > 254) {
270 		dev_err(sspi->dev, "Requested rate too low (%u)\n",
271 			sspi->speed);
272 		return -EINVAL;
273 	}
274 
275 	val = readl(sspi->regs + SYNQUACER_HSSPI_REG_PCC(cs));
276 	val &= ~SYNQUACER_HSSPI_PCC_SAFESYNC;
277 	if (bpw == 8 &&	(mode & (SPI_TX_DUAL | SPI_RX_DUAL)) && div < 3)
278 		val |= SYNQUACER_HSSPI_PCC_SAFESYNC;
279 	if (bpw == 8 &&	(mode & (SPI_TX_QUAD | SPI_RX_QUAD)) && div < 6)
280 		val |= SYNQUACER_HSSPI_PCC_SAFESYNC;
281 	if (bpw == 16 && (mode & (SPI_TX_QUAD | SPI_RX_QUAD)) && div < 3)
282 		val |= SYNQUACER_HSSPI_PCC_SAFESYNC;
283 
284 	if (mode & SPI_CPHA)
285 		val |= SYNQUACER_HSSPI_PCC_CPHA;
286 	else
287 		val &= ~SYNQUACER_HSSPI_PCC_CPHA;
288 
289 	if (mode & SPI_CPOL)
290 		val |= SYNQUACER_HSSPI_PCC_CPOL;
291 	else
292 		val &= ~SYNQUACER_HSSPI_PCC_CPOL;
293 
294 	if (mode & SPI_CS_HIGH)
295 		val |= SYNQUACER_HSSPI_PCC_SSPOL;
296 	else
297 		val &= ~SYNQUACER_HSSPI_PCC_SSPOL;
298 
299 	if (mode & SPI_LSB_FIRST)
300 		val |= SYNQUACER_HSSPI_PCC_SDIR;
301 	else
302 		val &= ~SYNQUACER_HSSPI_PCC_SDIR;
303 
304 	if (sspi->aces)
305 		val |= SYNQUACER_HSSPI_PCC_ACES;
306 	else
307 		val &= ~SYNQUACER_HSSPI_PCC_ACES;
308 
309 	if (sspi->rtm)
310 		val |= SYNQUACER_HSSPI_PCC_RTM;
311 	else
312 		val &= ~SYNQUACER_HSSPI_PCC_RTM;
313 
314 	val |= (3 << SYNQUACER_HSSPI_PCC_SS2CD_SHIFT);
315 	val |= SYNQUACER_HSSPI_PCC_SENDIAN;
316 
317 	val &= ~(SYNQUACER_HSSPI_PCC_CDRS_MASK <<
318 		 SYNQUACER_HSSPI_PCC_CDRS_SHIFT);
319 	val |= ((div >> 1) << SYNQUACER_HSSPI_PCC_CDRS_SHIFT);
320 
321 	writel(val, sspi->regs + SYNQUACER_HSSPI_REG_PCC(cs));
322 
323 	val = readl(sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
324 	val &= ~(SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_MASK <<
325 		 SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_SHIFT);
326 	val |= ((bpw / 8 - 1) << SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_SHIFT);
327 	writel(val, sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
328 
329 	val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
330 	val &= ~(SYNQUACER_HSSPI_DMTRP_DATA_MASK <<
331 		 SYNQUACER_HSSPI_DMTRP_DATA_SHIFT);
332 
333 	if (xfer->rx_buf)
334 		val |= (SYNQUACER_HSSPI_DMTRP_DATA_RX <<
335 			SYNQUACER_HSSPI_DMTRP_DATA_SHIFT);
336 	else
337 		val |= (SYNQUACER_HSSPI_DMTRP_DATA_TX <<
338 			SYNQUACER_HSSPI_DMTRP_DATA_SHIFT);
339 
340 	val &= ~(3 << SYNQUACER_HSSPI_DMTRP_BUS_WIDTH_SHIFT);
341 	val |= ((bus_width >> 1) << SYNQUACER_HSSPI_DMTRP_BUS_WIDTH_SHIFT);
342 	writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
343 
344 	sspi->bpw = bpw;
345 	sspi->mode = mode;
346 	sspi->speed = speed;
347 	sspi->cs = spi_get_chipselect(spi, 0);
348 	sspi->bus_width = bus_width;
349 
350 	return 0;
351 }
352 
synquacer_spi_transfer_one(struct spi_master * master,struct spi_device * spi,struct spi_transfer * xfer)353 static int synquacer_spi_transfer_one(struct spi_master *master,
354 				      struct spi_device *spi,
355 				      struct spi_transfer *xfer)
356 {
357 	struct synquacer_spi *sspi = spi_master_get_devdata(master);
358 	int ret;
359 	int status = 0;
360 	u32 words;
361 	u8 bpw;
362 	u32 val;
363 
364 	val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
365 	val &= ~SYNQUACER_HSSPI_DMSTOP_STOP;
366 	writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
367 
368 	val = readl(sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
369 	val |= SYNQUACER_HSSPI_FIFOCFG_RX_FLUSH;
370 	val |= SYNQUACER_HSSPI_FIFOCFG_TX_FLUSH;
371 	writel(val, sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
372 
373 	/*
374 	 * See if we can transfer 4-bytes as 1 word
375 	 * to maximize the FIFO buffer efficiency.
376 	 */
377 	bpw = xfer->bits_per_word;
378 	if (bpw == 8 && !(xfer->len % 4) && !(spi->mode & SPI_LSB_FIRST))
379 		xfer->bits_per_word = 32;
380 
381 	ret = synquacer_spi_config(master, spi, xfer);
382 
383 	/* restore */
384 	xfer->bits_per_word = bpw;
385 
386 	if (ret)
387 		return ret;
388 
389 	reinit_completion(&sspi->transfer_done);
390 
391 	sspi->tx_buf = xfer->tx_buf;
392 	sspi->rx_buf = xfer->rx_buf;
393 
394 	switch (sspi->bpw) {
395 	case 8:
396 		words = xfer->len;
397 		break;
398 	case 16:
399 		words = xfer->len / 2;
400 		break;
401 	case 24:
402 		/* fallthrough, should use 32-bits access */
403 	case 32:
404 		words = xfer->len / 4;
405 		break;
406 	default:
407 		dev_err(sspi->dev, "unsupported bpw: %d\n", sspi->bpw);
408 		return -EINVAL;
409 	}
410 
411 	if (xfer->tx_buf)
412 		sspi->tx_words = words;
413 	else
414 		sspi->tx_words = 0;
415 
416 	if (xfer->rx_buf)
417 		sspi->rx_words = words;
418 	else
419 		sspi->rx_words = 0;
420 
421 	if (xfer->tx_buf) {
422 		status = write_fifo(sspi);
423 		if (status < 0) {
424 			dev_err(sspi->dev, "failed write_fifo. status: 0x%x\n",
425 				status);
426 			return status;
427 		}
428 	}
429 
430 	if (xfer->rx_buf) {
431 		val = readl(sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
432 		val &= ~(SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_MASK <<
433 			 SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_SHIFT);
434 		val |= ((sspi->rx_words > SYNQUACER_HSSPI_FIFO_DEPTH ?
435 			SYNQUACER_HSSPI_FIFO_RX_THRESHOLD : sspi->rx_words) <<
436 			SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_SHIFT);
437 		writel(val, sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
438 	}
439 
440 	writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_TXC);
441 	writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_RXC);
442 
443 	/* Trigger */
444 	val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
445 	val |= SYNQUACER_HSSPI_DMSTART_START;
446 	writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
447 
448 	if (xfer->tx_buf) {
449 		val = SYNQUACER_HSSPI_TXE_FIFO_EMPTY;
450 		writel(val, sspi->regs + SYNQUACER_HSSPI_REG_TXE);
451 		status = wait_for_completion_timeout(&sspi->transfer_done,
452 			msecs_to_jiffies(SYNQUACER_HSSPI_TRANSFER_TMOUT_MSEC));
453 		writel(0, sspi->regs + SYNQUACER_HSSPI_REG_TXE);
454 	}
455 
456 	if (xfer->rx_buf) {
457 		u32 buf[SYNQUACER_HSSPI_FIFO_DEPTH];
458 
459 		val = SYNQUACER_HSSPI_RXE_FIFO_MORE_THAN_THRESHOLD |
460 		      SYNQUACER_HSSPI_RXE_SLAVE_RELEASED;
461 		writel(val, sspi->regs + SYNQUACER_HSSPI_REG_RXE);
462 		status = wait_for_completion_timeout(&sspi->transfer_done,
463 			msecs_to_jiffies(SYNQUACER_HSSPI_TRANSFER_TMOUT_MSEC));
464 		writel(0, sspi->regs + SYNQUACER_HSSPI_REG_RXE);
465 
466 		/* stop RX and clean RXFIFO */
467 		val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
468 		val |= SYNQUACER_HSSPI_DMSTOP_STOP;
469 		writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
470 		sspi->rx_buf = buf;
471 		sspi->rx_words = SYNQUACER_HSSPI_FIFO_DEPTH;
472 		read_fifo(sspi);
473 	}
474 
475 	if (status == 0) {
476 		dev_err(sspi->dev, "failed to transfer. Timeout.\n");
477 		return -ETIMEDOUT;
478 	}
479 
480 	return 0;
481 }
482 
synquacer_spi_set_cs(struct spi_device * spi,bool enable)483 static void synquacer_spi_set_cs(struct spi_device *spi, bool enable)
484 {
485 	struct synquacer_spi *sspi = spi_master_get_devdata(spi->master);
486 	u32 val;
487 
488 	val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
489 	val &= ~(SYNQUACER_HSSPI_DMPSEL_CS_MASK <<
490 		 SYNQUACER_HSSPI_DMPSEL_CS_SHIFT);
491 	val |= spi_get_chipselect(spi, 0) << SYNQUACER_HSSPI_DMPSEL_CS_SHIFT;
492 
493 	if (!enable)
494 		val |= SYNQUACER_HSSPI_DMSTOP_STOP;
495 
496 	writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
497 }
498 
synquacer_spi_wait_status_update(struct synquacer_spi * sspi,bool enable)499 static int synquacer_spi_wait_status_update(struct synquacer_spi *sspi,
500 					    bool enable)
501 {
502 	u32 val;
503 	unsigned long timeout = jiffies +
504 		msecs_to_jiffies(SYNQUACER_HSSPI_ENABLE_TMOUT_MSEC);
505 
506 	/* wait MES(Module Enable Status) is updated */
507 	do {
508 		val = readl(sspi->regs + SYNQUACER_HSSPI_REG_MCTRL) &
509 		      SYNQUACER_HSSPI_MCTRL_MES;
510 		if (enable && val)
511 			return 0;
512 		if (!enable && !val)
513 			return 0;
514 	} while (time_before(jiffies, timeout));
515 
516 	dev_err(sspi->dev, "timeout occurs in updating Module Enable Status\n");
517 	return -EBUSY;
518 }
519 
synquacer_spi_enable(struct spi_master * master)520 static int synquacer_spi_enable(struct spi_master *master)
521 {
522 	u32 val;
523 	int status;
524 	struct synquacer_spi *sspi = spi_master_get_devdata(master);
525 
526 	/* Disable module */
527 	writel(0, sspi->regs + SYNQUACER_HSSPI_REG_MCTRL);
528 	status = synquacer_spi_wait_status_update(sspi, false);
529 	if (status < 0)
530 		return status;
531 
532 	writel(0, sspi->regs + SYNQUACER_HSSPI_REG_TXE);
533 	writel(0, sspi->regs + SYNQUACER_HSSPI_REG_RXE);
534 	writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_TXC);
535 	writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_RXC);
536 	writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_FAULTC);
537 
538 	val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMCFG);
539 	val &= ~SYNQUACER_HSSPI_DMCFG_SSDC;
540 	val &= ~SYNQUACER_HSSPI_DMCFG_MSTARTEN;
541 	writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMCFG);
542 
543 	val = readl(sspi->regs + SYNQUACER_HSSPI_REG_MCTRL);
544 	if (sspi->clk_src_type == SYNQUACER_HSSPI_CLOCK_SRC_IPCLK)
545 		val |= SYNQUACER_HSSPI_MCTRL_CDSS;
546 	else
547 		val &= ~SYNQUACER_HSSPI_MCTRL_CDSS;
548 
549 	val &= ~SYNQUACER_HSSPI_MCTRL_COMMAND_SEQUENCE_EN;
550 	val |= SYNQUACER_HSSPI_MCTRL_MEN;
551 	val |= SYNQUACER_HSSPI_MCTRL_SYNCON;
552 
553 	/* Enable module */
554 	writel(val, sspi->regs + SYNQUACER_HSSPI_REG_MCTRL);
555 	status = synquacer_spi_wait_status_update(sspi, true);
556 	if (status < 0)
557 		return status;
558 
559 	return 0;
560 }
561 
sq_spi_rx_handler(int irq,void * priv)562 static irqreturn_t sq_spi_rx_handler(int irq, void *priv)
563 {
564 	uint32_t val;
565 	struct synquacer_spi *sspi = priv;
566 
567 	val = readl(sspi->regs + SYNQUACER_HSSPI_REG_RXF);
568 	if ((val & SYNQUACER_HSSPI_RXF_SLAVE_RELEASED) ||
569 	    (val & SYNQUACER_HSSPI_RXF_FIFO_MORE_THAN_THRESHOLD)) {
570 		read_fifo(sspi);
571 
572 		if (sspi->rx_words == 0) {
573 			writel(0, sspi->regs + SYNQUACER_HSSPI_REG_RXE);
574 			complete(&sspi->transfer_done);
575 		}
576 		return IRQ_HANDLED;
577 	}
578 
579 	return IRQ_NONE;
580 }
581 
sq_spi_tx_handler(int irq,void * priv)582 static irqreturn_t sq_spi_tx_handler(int irq, void *priv)
583 {
584 	uint32_t val;
585 	struct synquacer_spi *sspi = priv;
586 
587 	val = readl(sspi->regs + SYNQUACER_HSSPI_REG_TXF);
588 	if (val & SYNQUACER_HSSPI_TXF_FIFO_EMPTY) {
589 		if (sspi->tx_words == 0) {
590 			writel(0, sspi->regs + SYNQUACER_HSSPI_REG_TXE);
591 			complete(&sspi->transfer_done);
592 		} else {
593 			write_fifo(sspi);
594 		}
595 		return IRQ_HANDLED;
596 	}
597 
598 	return IRQ_NONE;
599 }
600 
synquacer_spi_probe(struct platform_device * pdev)601 static int synquacer_spi_probe(struct platform_device *pdev)
602 {
603 	struct device_node *np = pdev->dev.of_node;
604 	struct spi_master *master;
605 	struct synquacer_spi *sspi;
606 	int ret;
607 	int rx_irq, tx_irq;
608 
609 	master = spi_alloc_master(&pdev->dev, sizeof(*sspi));
610 	if (!master)
611 		return -ENOMEM;
612 
613 	platform_set_drvdata(pdev, master);
614 
615 	sspi = spi_master_get_devdata(master);
616 	sspi->dev = &pdev->dev;
617 
618 	init_completion(&sspi->transfer_done);
619 
620 	sspi->regs = devm_platform_ioremap_resource(pdev, 0);
621 	if (IS_ERR(sspi->regs)) {
622 		ret = PTR_ERR(sspi->regs);
623 		goto put_spi;
624 	}
625 
626 	sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IHCLK; /* Default */
627 	device_property_read_u32(&pdev->dev, "socionext,ihclk-rate",
628 				 &master->max_speed_hz); /* for ACPI */
629 
630 	if (dev_of_node(&pdev->dev)) {
631 		if (device_property_match_string(&pdev->dev,
632 					 "clock-names", "iHCLK") >= 0) {
633 			sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IHCLK;
634 			sspi->clk = devm_clk_get(sspi->dev, "iHCLK");
635 		} else if (device_property_match_string(&pdev->dev,
636 						"clock-names", "iPCLK") >= 0) {
637 			sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IPCLK;
638 			sspi->clk = devm_clk_get(sspi->dev, "iPCLK");
639 		} else {
640 			dev_err(&pdev->dev, "specified wrong clock source\n");
641 			ret = -EINVAL;
642 			goto put_spi;
643 		}
644 
645 		if (IS_ERR(sspi->clk)) {
646 			ret = dev_err_probe(&pdev->dev, PTR_ERR(sspi->clk),
647 					    "clock not found\n");
648 			goto put_spi;
649 		}
650 
651 		ret = clk_prepare_enable(sspi->clk);
652 		if (ret) {
653 			dev_err(&pdev->dev, "failed to enable clock (%d)\n",
654 				ret);
655 			goto put_spi;
656 		}
657 
658 		master->max_speed_hz = clk_get_rate(sspi->clk);
659 	}
660 
661 	if (!master->max_speed_hz) {
662 		dev_err(&pdev->dev, "missing clock source\n");
663 		ret = -EINVAL;
664 		goto disable_clk;
665 	}
666 	master->min_speed_hz = master->max_speed_hz / 254;
667 
668 	sspi->aces = device_property_read_bool(&pdev->dev,
669 					       "socionext,set-aces");
670 	sspi->rtm = device_property_read_bool(&pdev->dev, "socionext,use-rtm");
671 
672 	master->num_chipselect = SYNQUACER_HSSPI_NUM_CHIP_SELECT;
673 
674 	rx_irq = platform_get_irq(pdev, 0);
675 	if (rx_irq <= 0) {
676 		ret = rx_irq;
677 		goto disable_clk;
678 	}
679 	snprintf(sspi->rx_irq_name, SYNQUACER_HSSPI_IRQ_NAME_MAX, "%s-rx",
680 		 dev_name(&pdev->dev));
681 	ret = devm_request_irq(&pdev->dev, rx_irq, sq_spi_rx_handler,
682 				0, sspi->rx_irq_name, sspi);
683 	if (ret) {
684 		dev_err(&pdev->dev, "request rx_irq failed (%d)\n", ret);
685 		goto disable_clk;
686 	}
687 
688 	tx_irq = platform_get_irq(pdev, 1);
689 	if (tx_irq <= 0) {
690 		ret = tx_irq;
691 		goto disable_clk;
692 	}
693 	snprintf(sspi->tx_irq_name, SYNQUACER_HSSPI_IRQ_NAME_MAX, "%s-tx",
694 		 dev_name(&pdev->dev));
695 	ret = devm_request_irq(&pdev->dev, tx_irq, sq_spi_tx_handler,
696 				0, sspi->tx_irq_name, sspi);
697 	if (ret) {
698 		dev_err(&pdev->dev, "request tx_irq failed (%d)\n", ret);
699 		goto disable_clk;
700 	}
701 
702 	master->dev.of_node = np;
703 	master->dev.fwnode = pdev->dev.fwnode;
704 	master->auto_runtime_pm = true;
705 	master->bus_num = pdev->id;
706 
707 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL |
708 			    SPI_TX_QUAD | SPI_RX_QUAD;
709 	master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(24) |
710 				     SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
711 
712 	master->set_cs = synquacer_spi_set_cs;
713 	master->transfer_one = synquacer_spi_transfer_one;
714 
715 	ret = synquacer_spi_enable(master);
716 	if (ret)
717 		goto disable_clk;
718 
719 	pm_runtime_set_active(sspi->dev);
720 	pm_runtime_enable(sspi->dev);
721 
722 	ret = devm_spi_register_master(sspi->dev, master);
723 	if (ret)
724 		goto disable_pm;
725 
726 	return 0;
727 
728 disable_pm:
729 	pm_runtime_disable(sspi->dev);
730 disable_clk:
731 	clk_disable_unprepare(sspi->clk);
732 put_spi:
733 	spi_master_put(master);
734 
735 	return ret;
736 }
737 
synquacer_spi_remove(struct platform_device * pdev)738 static void synquacer_spi_remove(struct platform_device *pdev)
739 {
740 	struct spi_master *master = platform_get_drvdata(pdev);
741 	struct synquacer_spi *sspi = spi_master_get_devdata(master);
742 
743 	pm_runtime_disable(sspi->dev);
744 
745 	clk_disable_unprepare(sspi->clk);
746 }
747 
synquacer_spi_suspend(struct device * dev)748 static int __maybe_unused synquacer_spi_suspend(struct device *dev)
749 {
750 	struct spi_master *master = dev_get_drvdata(dev);
751 	struct synquacer_spi *sspi = spi_master_get_devdata(master);
752 	int ret;
753 
754 	ret = spi_master_suspend(master);
755 	if (ret)
756 		return ret;
757 
758 	if (!pm_runtime_suspended(dev))
759 		clk_disable_unprepare(sspi->clk);
760 
761 	return ret;
762 }
763 
synquacer_spi_resume(struct device * dev)764 static int __maybe_unused synquacer_spi_resume(struct device *dev)
765 {
766 	struct spi_master *master = dev_get_drvdata(dev);
767 	struct synquacer_spi *sspi = spi_master_get_devdata(master);
768 	int ret;
769 
770 	if (!pm_runtime_suspended(dev)) {
771 		/* Ensure reconfigure during next xfer */
772 		sspi->speed = 0;
773 
774 		ret = clk_prepare_enable(sspi->clk);
775 		if (ret < 0) {
776 			dev_err(dev, "failed to enable clk (%d)\n",
777 				ret);
778 			return ret;
779 		}
780 
781 		ret = synquacer_spi_enable(master);
782 		if (ret) {
783 			clk_disable_unprepare(sspi->clk);
784 			dev_err(dev, "failed to enable spi (%d)\n", ret);
785 			return ret;
786 		}
787 	}
788 
789 	ret = spi_master_resume(master);
790 	if (ret < 0)
791 		clk_disable_unprepare(sspi->clk);
792 
793 	return ret;
794 }
795 
796 static SIMPLE_DEV_PM_OPS(synquacer_spi_pm_ops, synquacer_spi_suspend,
797 			 synquacer_spi_resume);
798 
799 static const struct of_device_id synquacer_spi_of_match[] = {
800 	{.compatible = "socionext,synquacer-spi"},
801 	{}
802 };
803 MODULE_DEVICE_TABLE(of, synquacer_spi_of_match);
804 
805 #ifdef CONFIG_ACPI
806 static const struct acpi_device_id synquacer_hsspi_acpi_ids[] = {
807 	{ "SCX0004" },
808 	{ /* sentinel */ }
809 };
810 MODULE_DEVICE_TABLE(acpi, synquacer_hsspi_acpi_ids);
811 #endif
812 
813 static struct platform_driver synquacer_spi_driver = {
814 	.driver = {
815 		.name = "synquacer-spi",
816 		.pm = &synquacer_spi_pm_ops,
817 		.of_match_table = synquacer_spi_of_match,
818 		.acpi_match_table = ACPI_PTR(synquacer_hsspi_acpi_ids),
819 	},
820 	.probe = synquacer_spi_probe,
821 	.remove_new = synquacer_spi_remove,
822 };
823 module_platform_driver(synquacer_spi_driver);
824 
825 MODULE_DESCRIPTION("Socionext Synquacer HS-SPI controller driver");
826 MODULE_AUTHOR("Masahisa Kojima <masahisa.kojima@linaro.org>");
827 MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
828 MODULE_LICENSE("GPL v2");
829