1 // SPDX-License-Identifier: (GPL-2.0)
2 /*
3  * Microchip coreQSPI QSPI controller driver
4  *
5  * Copyright (C) 2018-2022 Microchip Technology Inc. and its subsidiaries
6  *
7  * Author: Naga Sureshkumar Relli <nagasuresh.relli@microchip.com>
8  *
9  */
10 
11 #include <linux/clk.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/iopoll.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/of_irq.h>
20 #include <linux/platform_device.h>
21 #include <linux/spi/spi.h>
22 #include <linux/spi/spi-mem.h>
23 
24 /*
25  * QSPI Control register mask defines
26  */
27 #define CONTROL_ENABLE		BIT(0)
28 #define CONTROL_MASTER		BIT(1)
29 #define CONTROL_XIP		BIT(2)
30 #define CONTROL_XIPADDR		BIT(3)
31 #define CONTROL_CLKIDLE		BIT(10)
32 #define CONTROL_SAMPLE_MASK	GENMASK(12, 11)
33 #define CONTROL_MODE0		BIT(13)
34 #define CONTROL_MODE12_MASK	GENMASK(15, 14)
35 #define CONTROL_MODE12_EX_RO	BIT(14)
36 #define CONTROL_MODE12_EX_RW	BIT(15)
37 #define CONTROL_MODE12_FULL	GENMASK(15, 14)
38 #define CONTROL_FLAGSX4		BIT(16)
39 #define CONTROL_CLKRATE_MASK	GENMASK(27, 24)
40 #define CONTROL_CLKRATE_SHIFT	24
41 
42 /*
43  * QSPI Frames register mask defines
44  */
45 #define FRAMES_TOTALBYTES_MASK	GENMASK(15, 0)
46 #define FRAMES_CMDBYTES_MASK	GENMASK(24, 16)
47 #define FRAMES_CMDBYTES_SHIFT	16
48 #define FRAMES_SHIFT		25
49 #define FRAMES_IDLE_MASK	GENMASK(29, 26)
50 #define FRAMES_IDLE_SHIFT	26
51 #define FRAMES_FLAGBYTE		BIT(30)
52 #define FRAMES_FLAGWORD		BIT(31)
53 
54 /*
55  * QSPI Interrupt Enable register mask defines
56  */
57 #define IEN_TXDONE		BIT(0)
58 #define IEN_RXDONE		BIT(1)
59 #define IEN_RXAVAILABLE		BIT(2)
60 #define IEN_TXAVAILABLE		BIT(3)
61 #define IEN_RXFIFOEMPTY		BIT(4)
62 #define IEN_TXFIFOFULL		BIT(5)
63 
64 /*
65  * QSPI Status register mask defines
66  */
67 #define STATUS_TXDONE		BIT(0)
68 #define STATUS_RXDONE		BIT(1)
69 #define STATUS_RXAVAILABLE	BIT(2)
70 #define STATUS_TXAVAILABLE	BIT(3)
71 #define STATUS_RXFIFOEMPTY	BIT(4)
72 #define STATUS_TXFIFOFULL	BIT(5)
73 #define STATUS_READY		BIT(7)
74 #define STATUS_FLAGSX4		BIT(8)
75 #define STATUS_MASK		GENMASK(8, 0)
76 
77 #define BYTESUPPER_MASK		GENMASK(31, 16)
78 #define BYTESLOWER_MASK		GENMASK(15, 0)
79 
80 #define MAX_DIVIDER		16
81 #define MIN_DIVIDER		0
82 #define MAX_DATA_CMD_LEN	256
83 
84 /* QSPI ready time out value */
85 #define TIMEOUT_MS		500
86 
87 /*
88  * QSPI Register offsets.
89  */
90 #define REG_CONTROL		(0x00)
91 #define REG_FRAMES		(0x04)
92 #define REG_IEN			(0x0c)
93 #define REG_STATUS		(0x10)
94 #define REG_DIRECT_ACCESS	(0x14)
95 #define REG_UPPER_ACCESS	(0x18)
96 #define REG_RX_DATA		(0x40)
97 #define REG_TX_DATA		(0x44)
98 #define REG_X4_RX_DATA		(0x48)
99 #define REG_X4_TX_DATA		(0x4c)
100 #define REG_FRAMESUP		(0x50)
101 
102 /**
103  * struct mchp_coreqspi - Defines qspi driver instance
104  * @regs:              Virtual address of the QSPI controller registers
105  * @clk:               QSPI Operating clock
106  * @data_completion:   completion structure
107  * @op_lock:           lock access to the device
108  * @txbuf:             TX buffer
109  * @rxbuf:             RX buffer
110  * @irq:               IRQ number
111  * @tx_len:            Number of bytes left to transfer
112  * @rx_len:            Number of bytes left to receive
113  */
114 struct mchp_coreqspi {
115 	void __iomem *regs;
116 	struct clk *clk;
117 	struct completion data_completion;
118 	struct mutex op_lock; /* lock access to the device */
119 	u8 *txbuf;
120 	u8 *rxbuf;
121 	int irq;
122 	int tx_len;
123 	int rx_len;
124 };
125 
mchp_coreqspi_set_mode(struct mchp_coreqspi * qspi,const struct spi_mem_op * op)126 static int mchp_coreqspi_set_mode(struct mchp_coreqspi *qspi, const struct spi_mem_op *op)
127 {
128 	u32 control = readl_relaxed(qspi->regs + REG_CONTROL);
129 
130 	/*
131 	 * The operating mode can be configured based on the command that needs to be send.
132 	 * bits[15:14]: Sets whether multiple bit SPI operates in normal, extended or full modes.
133 	 *		00: Normal (single DQ0 TX and single DQ1 RX lines)
134 	 *		01: Extended RO (command and address bytes on DQ0 only)
135 	 *		10: Extended RW (command byte on DQ0 only)
136 	 *		11: Full. (command and address are on all DQ lines)
137 	 * bit[13]:	Sets whether multiple bit SPI uses 2 or 4 bits of data
138 	 *		0: 2-bits (BSPI)
139 	 *		1: 4-bits (QSPI)
140 	 */
141 	if (op->data.buswidth == 4 || op->data.buswidth == 2) {
142 		control &= ~CONTROL_MODE12_MASK;
143 		if (op->cmd.buswidth == 1 && (op->addr.buswidth == 1 || op->addr.buswidth == 0))
144 			control |= CONTROL_MODE12_EX_RO;
145 		else if (op->cmd.buswidth == 1)
146 			control |= CONTROL_MODE12_EX_RW;
147 		else
148 			control |= CONTROL_MODE12_FULL;
149 
150 		control |= CONTROL_MODE0;
151 	} else {
152 		control &= ~(CONTROL_MODE12_MASK |
153 			     CONTROL_MODE0);
154 	}
155 
156 	writel_relaxed(control, qspi->regs + REG_CONTROL);
157 
158 	return 0;
159 }
160 
mchp_coreqspi_read_op(struct mchp_coreqspi * qspi)161 static inline void mchp_coreqspi_read_op(struct mchp_coreqspi *qspi)
162 {
163 	u32 control, data;
164 
165 	if (!qspi->rx_len)
166 		return;
167 
168 	control = readl_relaxed(qspi->regs + REG_CONTROL);
169 
170 	/*
171 	 * Read 4-bytes from the SPI FIFO in single transaction and then read
172 	 * the reamaining data byte wise.
173 	 */
174 	control |= CONTROL_FLAGSX4;
175 	writel_relaxed(control, qspi->regs + REG_CONTROL);
176 
177 	while (qspi->rx_len >= 4) {
178 		while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY)
179 			;
180 		data = readl_relaxed(qspi->regs + REG_X4_RX_DATA);
181 		*(u32 *)qspi->rxbuf = data;
182 		qspi->rxbuf += 4;
183 		qspi->rx_len -= 4;
184 	}
185 
186 	control &= ~CONTROL_FLAGSX4;
187 	writel_relaxed(control, qspi->regs + REG_CONTROL);
188 
189 	while (qspi->rx_len--) {
190 		while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY)
191 			;
192 		data = readl_relaxed(qspi->regs + REG_RX_DATA);
193 		*qspi->rxbuf++ = (data & 0xFF);
194 	}
195 }
196 
mchp_coreqspi_write_op(struct mchp_coreqspi * qspi,bool word)197 static inline void mchp_coreqspi_write_op(struct mchp_coreqspi *qspi, bool word)
198 {
199 	u32 control, data;
200 
201 	control = readl_relaxed(qspi->regs + REG_CONTROL);
202 	control |= CONTROL_FLAGSX4;
203 	writel_relaxed(control, qspi->regs + REG_CONTROL);
204 
205 	while (qspi->tx_len >= 4) {
206 		while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL)
207 			;
208 		data = *(u32 *)qspi->txbuf;
209 		qspi->txbuf += 4;
210 		qspi->tx_len -= 4;
211 		writel_relaxed(data, qspi->regs + REG_X4_TX_DATA);
212 	}
213 
214 	control &= ~CONTROL_FLAGSX4;
215 	writel_relaxed(control, qspi->regs + REG_CONTROL);
216 
217 	while (qspi->tx_len--) {
218 		while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL)
219 			;
220 		data =  *qspi->txbuf++;
221 		writel_relaxed(data, qspi->regs + REG_TX_DATA);
222 	}
223 }
224 
mchp_coreqspi_enable_ints(struct mchp_coreqspi * qspi)225 static void mchp_coreqspi_enable_ints(struct mchp_coreqspi *qspi)
226 {
227 	u32 mask = IEN_TXDONE |
228 		   IEN_RXDONE |
229 		   IEN_RXAVAILABLE;
230 
231 	writel_relaxed(mask, qspi->regs + REG_IEN);
232 }
233 
mchp_coreqspi_disable_ints(struct mchp_coreqspi * qspi)234 static void mchp_coreqspi_disable_ints(struct mchp_coreqspi *qspi)
235 {
236 	writel_relaxed(0, qspi->regs + REG_IEN);
237 }
238 
mchp_coreqspi_isr(int irq,void * dev_id)239 static irqreturn_t mchp_coreqspi_isr(int irq, void *dev_id)
240 {
241 	struct mchp_coreqspi *qspi = (struct mchp_coreqspi *)dev_id;
242 	irqreturn_t ret = IRQ_NONE;
243 	int intfield = readl_relaxed(qspi->regs + REG_STATUS) & STATUS_MASK;
244 
245 	if (intfield == 0)
246 		return ret;
247 
248 	if (intfield & IEN_TXDONE) {
249 		writel_relaxed(IEN_TXDONE, qspi->regs + REG_STATUS);
250 		ret = IRQ_HANDLED;
251 	}
252 
253 	if (intfield & IEN_RXAVAILABLE) {
254 		writel_relaxed(IEN_RXAVAILABLE, qspi->regs + REG_STATUS);
255 		mchp_coreqspi_read_op(qspi);
256 		ret = IRQ_HANDLED;
257 	}
258 
259 	if (intfield & IEN_RXDONE) {
260 		writel_relaxed(IEN_RXDONE, qspi->regs + REG_STATUS);
261 		complete(&qspi->data_completion);
262 		ret = IRQ_HANDLED;
263 	}
264 
265 	return ret;
266 }
267 
mchp_coreqspi_setup_clock(struct mchp_coreqspi * qspi,struct spi_device * spi)268 static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_device *spi)
269 {
270 	unsigned long clk_hz;
271 	u32 control, baud_rate_val = 0;
272 
273 	clk_hz = clk_get_rate(qspi->clk);
274 	if (!clk_hz)
275 		return -EINVAL;
276 
277 	baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * spi->max_speed_hz);
278 	if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER) {
279 		dev_err(&spi->dev,
280 			"could not configure the clock for spi clock %d Hz & system clock %ld Hz\n",
281 			spi->max_speed_hz, clk_hz);
282 		return -EINVAL;
283 	}
284 
285 	control = readl_relaxed(qspi->regs + REG_CONTROL);
286 	control |= baud_rate_val << CONTROL_CLKRATE_SHIFT;
287 	writel_relaxed(control, qspi->regs + REG_CONTROL);
288 	control = readl_relaxed(qspi->regs + REG_CONTROL);
289 
290 	if ((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA))
291 		control |= CONTROL_CLKIDLE;
292 	else
293 		control &= ~CONTROL_CLKIDLE;
294 
295 	writel_relaxed(control, qspi->regs + REG_CONTROL);
296 
297 	return 0;
298 }
299 
mchp_coreqspi_setup_op(struct spi_device * spi_dev)300 static int mchp_coreqspi_setup_op(struct spi_device *spi_dev)
301 {
302 	struct spi_controller *ctlr = spi_dev->master;
303 	struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr);
304 	u32 control = readl_relaxed(qspi->regs + REG_CONTROL);
305 
306 	control |= (CONTROL_MASTER | CONTROL_ENABLE);
307 	control &= ~CONTROL_CLKIDLE;
308 	writel_relaxed(control, qspi->regs + REG_CONTROL);
309 
310 	return 0;
311 }
312 
mchp_coreqspi_config_op(struct mchp_coreqspi * qspi,const struct spi_mem_op * op)313 static inline void mchp_coreqspi_config_op(struct mchp_coreqspi *qspi, const struct spi_mem_op *op)
314 {
315 	u32 idle_cycles = 0;
316 	int total_bytes, cmd_bytes, frames, ctrl;
317 
318 	cmd_bytes = op->cmd.nbytes + op->addr.nbytes;
319 	total_bytes = cmd_bytes + op->data.nbytes;
320 
321 	/*
322 	 * As per the coreQSPI IP spec,the number of command and data bytes are
323 	 * controlled by the frames register for each SPI sequence. This supports
324 	 * the SPI flash memory read and writes sequences as below. so configure
325 	 * the cmd and total bytes accordingly.
326 	 * ---------------------------------------------------------------------
327 	 * TOTAL BYTES  |  CMD BYTES | What happens                             |
328 	 * ______________________________________________________________________
329 	 *              |            |                                          |
330 	 *     1        |   1        | The SPI core will transmit a single byte |
331 	 *              |            | and receive data is discarded            |
332 	 *              |            |                                          |
333 	 *     1        |   0        | The SPI core will transmit a single byte |
334 	 *              |            | and return a single byte                 |
335 	 *              |            |                                          |
336 	 *     10       |   4        | The SPI core will transmit 4 command     |
337 	 *              |            | bytes discarding the receive data and    |
338 	 *              |            | transmits 6 dummy bytes returning the 6  |
339 	 *              |            | received bytes and return a single byte  |
340 	 *              |            |                                          |
341 	 *     10       |   10       | The SPI core will transmit 10 command    |
342 	 *              |            |                                          |
343 	 *     10       |    0       | The SPI core will transmit 10 command    |
344 	 *              |            | bytes and returning 10 received bytes    |
345 	 * ______________________________________________________________________
346 	 */
347 	if (!(op->data.dir == SPI_MEM_DATA_IN))
348 		cmd_bytes = total_bytes;
349 
350 	frames = total_bytes & BYTESUPPER_MASK;
351 	writel_relaxed(frames, qspi->regs + REG_FRAMESUP);
352 	frames = total_bytes & BYTESLOWER_MASK;
353 	frames |= cmd_bytes << FRAMES_CMDBYTES_SHIFT;
354 
355 	if (op->dummy.buswidth)
356 		idle_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
357 
358 	frames |= idle_cycles << FRAMES_IDLE_SHIFT;
359 	ctrl = readl_relaxed(qspi->regs + REG_CONTROL);
360 
361 	if (ctrl & CONTROL_MODE12_MASK)
362 		frames |= (1 << FRAMES_SHIFT);
363 
364 	frames |= FRAMES_FLAGWORD;
365 	writel_relaxed(frames, qspi->regs + REG_FRAMES);
366 }
367 
mchp_qspi_wait_for_ready(struct spi_mem * mem)368 static int mchp_qspi_wait_for_ready(struct spi_mem *mem)
369 {
370 	struct mchp_coreqspi *qspi = spi_controller_get_devdata
371 				    (mem->spi->master);
372 	u32 status;
373 	int ret;
374 
375 	ret = readl_poll_timeout(qspi->regs + REG_STATUS, status,
376 				 (status & STATUS_READY), 0,
377 				 TIMEOUT_MS);
378 	if (ret) {
379 		dev_err(&mem->spi->dev,
380 			"Timeout waiting on QSPI ready.\n");
381 		return -ETIMEDOUT;
382 	}
383 
384 	return ret;
385 }
386 
mchp_coreqspi_exec_op(struct spi_mem * mem,const struct spi_mem_op * op)387 static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
388 {
389 	struct mchp_coreqspi *qspi = spi_controller_get_devdata
390 				    (mem->spi->master);
391 	u32 address = op->addr.val;
392 	u8 opcode = op->cmd.opcode;
393 	u8 opaddr[5];
394 	int err, i;
395 
396 	mutex_lock(&qspi->op_lock);
397 	err = mchp_qspi_wait_for_ready(mem);
398 	if (err)
399 		goto error;
400 
401 	err = mchp_coreqspi_setup_clock(qspi, mem->spi);
402 	if (err)
403 		goto error;
404 
405 	err = mchp_coreqspi_set_mode(qspi, op);
406 	if (err)
407 		goto error;
408 
409 	reinit_completion(&qspi->data_completion);
410 	mchp_coreqspi_config_op(qspi, op);
411 	if (op->cmd.opcode) {
412 		qspi->txbuf = &opcode;
413 		qspi->rxbuf = NULL;
414 		qspi->tx_len = op->cmd.nbytes;
415 		qspi->rx_len = 0;
416 		mchp_coreqspi_write_op(qspi, false);
417 	}
418 
419 	qspi->txbuf = &opaddr[0];
420 	if (op->addr.nbytes) {
421 		for (i = 0; i < op->addr.nbytes; i++)
422 			qspi->txbuf[i] = address >> (8 * (op->addr.nbytes - i - 1));
423 
424 		qspi->rxbuf = NULL;
425 		qspi->tx_len = op->addr.nbytes;
426 		qspi->rx_len = 0;
427 		mchp_coreqspi_write_op(qspi, false);
428 	}
429 
430 	if (op->data.nbytes) {
431 		if (op->data.dir == SPI_MEM_DATA_OUT) {
432 			qspi->txbuf = (u8 *)op->data.buf.out;
433 			qspi->rxbuf = NULL;
434 			qspi->rx_len = 0;
435 			qspi->tx_len = op->data.nbytes;
436 			mchp_coreqspi_write_op(qspi, true);
437 		} else {
438 			qspi->txbuf = NULL;
439 			qspi->rxbuf = (u8 *)op->data.buf.in;
440 			qspi->rx_len = op->data.nbytes;
441 			qspi->tx_len = 0;
442 		}
443 	}
444 
445 	mchp_coreqspi_enable_ints(qspi);
446 
447 	if (!wait_for_completion_timeout(&qspi->data_completion, msecs_to_jiffies(1000)))
448 		err = -ETIMEDOUT;
449 
450 error:
451 	mutex_unlock(&qspi->op_lock);
452 	mchp_coreqspi_disable_ints(qspi);
453 
454 	return err;
455 }
456 
mchp_coreqspi_supports_op(struct spi_mem * mem,const struct spi_mem_op * op)457 static bool mchp_coreqspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
458 {
459 	if (!spi_mem_default_supports_op(mem, op))
460 		return false;
461 
462 	if ((op->data.buswidth == 4 || op->data.buswidth == 2) &&
463 	    (op->cmd.buswidth == 1 && (op->addr.buswidth == 1 || op->addr.buswidth == 0))) {
464 		/*
465 		 * If the command and address are on DQ0 only, then this
466 		 * controller doesn't support sending data on dual and
467 		 * quad lines. but it supports reading data on dual and
468 		 * quad lines with same configuration as command and
469 		 * address on DQ0.
470 		 * i.e. The control register[15:13] :EX_RO(read only) is
471 		 * meant only for the command and address are on DQ0 but
472 		 * not to write data, it is just to read.
473 		 * Ex: 0x34h is Quad Load Program Data which is not
474 		 * supported. Then the spi-mem layer will iterate over
475 		 * each command and it will chose the supported one.
476 		 */
477 		if (op->data.dir == SPI_MEM_DATA_OUT)
478 			return false;
479 	}
480 
481 	return true;
482 }
483 
mchp_coreqspi_adjust_op_size(struct spi_mem * mem,struct spi_mem_op * op)484 static int mchp_coreqspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
485 {
486 	if (op->data.dir == SPI_MEM_DATA_OUT || op->data.dir == SPI_MEM_DATA_IN) {
487 		if (op->data.nbytes > MAX_DATA_CMD_LEN)
488 			op->data.nbytes = MAX_DATA_CMD_LEN;
489 	}
490 
491 	return 0;
492 }
493 
494 static const struct spi_controller_mem_ops mchp_coreqspi_mem_ops = {
495 	.adjust_op_size = mchp_coreqspi_adjust_op_size,
496 	.supports_op = mchp_coreqspi_supports_op,
497 	.exec_op = mchp_coreqspi_exec_op,
498 };
499 
mchp_coreqspi_probe(struct platform_device * pdev)500 static int mchp_coreqspi_probe(struct platform_device *pdev)
501 {
502 	struct spi_controller *ctlr;
503 	struct mchp_coreqspi *qspi;
504 	struct device *dev = &pdev->dev;
505 	struct device_node *np = dev->of_node;
506 	int ret;
507 
508 	ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*qspi));
509 	if (!ctlr)
510 		return dev_err_probe(&pdev->dev, -ENOMEM,
511 				     "unable to allocate master for QSPI controller\n");
512 
513 	qspi = spi_controller_get_devdata(ctlr);
514 	platform_set_drvdata(pdev, qspi);
515 
516 	qspi->regs = devm_platform_ioremap_resource(pdev, 0);
517 	if (IS_ERR(qspi->regs))
518 		return dev_err_probe(&pdev->dev, PTR_ERR(qspi->regs),
519 				     "failed to map registers\n");
520 
521 	qspi->clk = devm_clk_get(&pdev->dev, NULL);
522 	if (IS_ERR(qspi->clk))
523 		return dev_err_probe(&pdev->dev, PTR_ERR(qspi->clk),
524 				     "could not get clock\n");
525 
526 	ret = clk_prepare_enable(qspi->clk);
527 	if (ret)
528 		return dev_err_probe(&pdev->dev, ret,
529 				     "failed to enable clock\n");
530 
531 	init_completion(&qspi->data_completion);
532 	mutex_init(&qspi->op_lock);
533 
534 	qspi->irq = platform_get_irq(pdev, 0);
535 	if (qspi->irq < 0) {
536 		ret = qspi->irq;
537 		goto out;
538 	}
539 
540 	ret = devm_request_irq(&pdev->dev, qspi->irq, mchp_coreqspi_isr,
541 			       IRQF_SHARED, pdev->name, qspi);
542 	if (ret) {
543 		dev_err(&pdev->dev, "request_irq failed %d\n", ret);
544 		goto out;
545 	}
546 
547 	ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
548 	ctlr->mem_ops = &mchp_coreqspi_mem_ops;
549 	ctlr->setup = mchp_coreqspi_setup_op;
550 	ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
551 			  SPI_TX_DUAL | SPI_TX_QUAD;
552 	ctlr->dev.of_node = np;
553 
554 	ret = devm_spi_register_controller(&pdev->dev, ctlr);
555 	if (ret) {
556 		dev_err_probe(&pdev->dev, ret,
557 			      "spi_register_controller failed\n");
558 		goto out;
559 	}
560 
561 	return 0;
562 
563 out:
564 	clk_disable_unprepare(qspi->clk);
565 
566 	return ret;
567 }
568 
mchp_coreqspi_remove(struct platform_device * pdev)569 static void mchp_coreqspi_remove(struct platform_device *pdev)
570 {
571 	struct mchp_coreqspi *qspi = platform_get_drvdata(pdev);
572 	u32 control = readl_relaxed(qspi->regs + REG_CONTROL);
573 
574 	mchp_coreqspi_disable_ints(qspi);
575 	control &= ~CONTROL_ENABLE;
576 	writel_relaxed(control, qspi->regs + REG_CONTROL);
577 	clk_disable_unprepare(qspi->clk);
578 }
579 
580 static const struct of_device_id mchp_coreqspi_of_match[] = {
581 	{ .compatible = "microchip,coreqspi-rtl-v2" },
582 	{ /* sentinel */ }
583 };
584 MODULE_DEVICE_TABLE(of, mchp_coreqspi_of_match);
585 
586 static struct platform_driver mchp_coreqspi_driver = {
587 	.probe = mchp_coreqspi_probe,
588 	.driver = {
589 		.name = "microchip,coreqspi",
590 		.of_match_table = mchp_coreqspi_of_match,
591 	},
592 	.remove_new = mchp_coreqspi_remove,
593 };
594 module_platform_driver(mchp_coreqspi_driver);
595 
596 MODULE_AUTHOR("Naga Sureshkumar Relli <nagasuresh.relli@microchip.com");
597 MODULE_DESCRIPTION("Microchip coreQSPI QSPI controller driver");
598 MODULE_LICENSE("GPL");
599