1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Driver for Atmel AT32 and AT91 SPI Controllers
4   *
5   * Copyright (C) 2006 Atmel Corporation
6   */
7  
8  #include <linux/kernel.h>
9  #include <linux/clk.h>
10  #include <linux/module.h>
11  #include <linux/platform_device.h>
12  #include <linux/delay.h>
13  #include <linux/dma-mapping.h>
14  #include <linux/dmaengine.h>
15  #include <linux/err.h>
16  #include <linux/interrupt.h>
17  #include <linux/spi/spi.h>
18  #include <linux/slab.h>
19  #include <linux/of.h>
20  
21  #include <linux/io.h>
22  #include <linux/gpio/consumer.h>
23  #include <linux/pinctrl/consumer.h>
24  #include <linux/pm_runtime.h>
25  #include <trace/events/spi.h>
26  
27  /* SPI register offsets */
28  #define SPI_CR					0x0000
29  #define SPI_MR					0x0004
30  #define SPI_RDR					0x0008
31  #define SPI_TDR					0x000c
32  #define SPI_SR					0x0010
33  #define SPI_IER					0x0014
34  #define SPI_IDR					0x0018
35  #define SPI_IMR					0x001c
36  #define SPI_CSR0				0x0030
37  #define SPI_CSR1				0x0034
38  #define SPI_CSR2				0x0038
39  #define SPI_CSR3				0x003c
40  #define SPI_FMR					0x0040
41  #define SPI_FLR					0x0044
42  #define SPI_VERSION				0x00fc
43  #define SPI_RPR					0x0100
44  #define SPI_RCR					0x0104
45  #define SPI_TPR					0x0108
46  #define SPI_TCR					0x010c
47  #define SPI_RNPR				0x0110
48  #define SPI_RNCR				0x0114
49  #define SPI_TNPR				0x0118
50  #define SPI_TNCR				0x011c
51  #define SPI_PTCR				0x0120
52  #define SPI_PTSR				0x0124
53  
54  /* Bitfields in CR */
55  #define SPI_SPIEN_OFFSET			0
56  #define SPI_SPIEN_SIZE				1
57  #define SPI_SPIDIS_OFFSET			1
58  #define SPI_SPIDIS_SIZE				1
59  #define SPI_SWRST_OFFSET			7
60  #define SPI_SWRST_SIZE				1
61  #define SPI_LASTXFER_OFFSET			24
62  #define SPI_LASTXFER_SIZE			1
63  #define SPI_TXFCLR_OFFSET			16
64  #define SPI_TXFCLR_SIZE				1
65  #define SPI_RXFCLR_OFFSET			17
66  #define SPI_RXFCLR_SIZE				1
67  #define SPI_FIFOEN_OFFSET			30
68  #define SPI_FIFOEN_SIZE				1
69  #define SPI_FIFODIS_OFFSET			31
70  #define SPI_FIFODIS_SIZE			1
71  
72  /* Bitfields in MR */
73  #define SPI_MSTR_OFFSET				0
74  #define SPI_MSTR_SIZE				1
75  #define SPI_PS_OFFSET				1
76  #define SPI_PS_SIZE				1
77  #define SPI_PCSDEC_OFFSET			2
78  #define SPI_PCSDEC_SIZE				1
79  #define SPI_FDIV_OFFSET				3
80  #define SPI_FDIV_SIZE				1
81  #define SPI_MODFDIS_OFFSET			4
82  #define SPI_MODFDIS_SIZE			1
83  #define SPI_WDRBT_OFFSET			5
84  #define SPI_WDRBT_SIZE				1
85  #define SPI_LLB_OFFSET				7
86  #define SPI_LLB_SIZE				1
87  #define SPI_PCS_OFFSET				16
88  #define SPI_PCS_SIZE				4
89  #define SPI_DLYBCS_OFFSET			24
90  #define SPI_DLYBCS_SIZE				8
91  
92  /* Bitfields in RDR */
93  #define SPI_RD_OFFSET				0
94  #define SPI_RD_SIZE				16
95  
96  /* Bitfields in TDR */
97  #define SPI_TD_OFFSET				0
98  #define SPI_TD_SIZE				16
99  
100  /* Bitfields in SR */
101  #define SPI_RDRF_OFFSET				0
102  #define SPI_RDRF_SIZE				1
103  #define SPI_TDRE_OFFSET				1
104  #define SPI_TDRE_SIZE				1
105  #define SPI_MODF_OFFSET				2
106  #define SPI_MODF_SIZE				1
107  #define SPI_OVRES_OFFSET			3
108  #define SPI_OVRES_SIZE				1
109  #define SPI_ENDRX_OFFSET			4
110  #define SPI_ENDRX_SIZE				1
111  #define SPI_ENDTX_OFFSET			5
112  #define SPI_ENDTX_SIZE				1
113  #define SPI_RXBUFF_OFFSET			6
114  #define SPI_RXBUFF_SIZE				1
115  #define SPI_TXBUFE_OFFSET			7
116  #define SPI_TXBUFE_SIZE				1
117  #define SPI_NSSR_OFFSET				8
118  #define SPI_NSSR_SIZE				1
119  #define SPI_TXEMPTY_OFFSET			9
120  #define SPI_TXEMPTY_SIZE			1
121  #define SPI_SPIENS_OFFSET			16
122  #define SPI_SPIENS_SIZE				1
123  #define SPI_TXFEF_OFFSET			24
124  #define SPI_TXFEF_SIZE				1
125  #define SPI_TXFFF_OFFSET			25
126  #define SPI_TXFFF_SIZE				1
127  #define SPI_TXFTHF_OFFSET			26
128  #define SPI_TXFTHF_SIZE				1
129  #define SPI_RXFEF_OFFSET			27
130  #define SPI_RXFEF_SIZE				1
131  #define SPI_RXFFF_OFFSET			28
132  #define SPI_RXFFF_SIZE				1
133  #define SPI_RXFTHF_OFFSET			29
134  #define SPI_RXFTHF_SIZE				1
135  #define SPI_TXFPTEF_OFFSET			30
136  #define SPI_TXFPTEF_SIZE			1
137  #define SPI_RXFPTEF_OFFSET			31
138  #define SPI_RXFPTEF_SIZE			1
139  
140  /* Bitfields in CSR0 */
141  #define SPI_CPOL_OFFSET				0
142  #define SPI_CPOL_SIZE				1
143  #define SPI_NCPHA_OFFSET			1
144  #define SPI_NCPHA_SIZE				1
145  #define SPI_CSAAT_OFFSET			3
146  #define SPI_CSAAT_SIZE				1
147  #define SPI_BITS_OFFSET				4
148  #define SPI_BITS_SIZE				4
149  #define SPI_SCBR_OFFSET				8
150  #define SPI_SCBR_SIZE				8
151  #define SPI_DLYBS_OFFSET			16
152  #define SPI_DLYBS_SIZE				8
153  #define SPI_DLYBCT_OFFSET			24
154  #define SPI_DLYBCT_SIZE				8
155  
156  /* Bitfields in RCR */
157  #define SPI_RXCTR_OFFSET			0
158  #define SPI_RXCTR_SIZE				16
159  
160  /* Bitfields in TCR */
161  #define SPI_TXCTR_OFFSET			0
162  #define SPI_TXCTR_SIZE				16
163  
164  /* Bitfields in RNCR */
165  #define SPI_RXNCR_OFFSET			0
166  #define SPI_RXNCR_SIZE				16
167  
168  /* Bitfields in TNCR */
169  #define SPI_TXNCR_OFFSET			0
170  #define SPI_TXNCR_SIZE				16
171  
172  /* Bitfields in PTCR */
173  #define SPI_RXTEN_OFFSET			0
174  #define SPI_RXTEN_SIZE				1
175  #define SPI_RXTDIS_OFFSET			1
176  #define SPI_RXTDIS_SIZE				1
177  #define SPI_TXTEN_OFFSET			8
178  #define SPI_TXTEN_SIZE				1
179  #define SPI_TXTDIS_OFFSET			9
180  #define SPI_TXTDIS_SIZE				1
181  
182  /* Bitfields in FMR */
183  #define SPI_TXRDYM_OFFSET			0
184  #define SPI_TXRDYM_SIZE				2
185  #define SPI_RXRDYM_OFFSET			4
186  #define SPI_RXRDYM_SIZE				2
187  #define SPI_TXFTHRES_OFFSET			16
188  #define SPI_TXFTHRES_SIZE			6
189  #define SPI_RXFTHRES_OFFSET			24
190  #define SPI_RXFTHRES_SIZE			6
191  
192  /* Bitfields in FLR */
193  #define SPI_TXFL_OFFSET				0
194  #define SPI_TXFL_SIZE				6
195  #define SPI_RXFL_OFFSET				16
196  #define SPI_RXFL_SIZE				6
197  
198  /* Constants for BITS */
199  #define SPI_BITS_8_BPT				0
200  #define SPI_BITS_9_BPT				1
201  #define SPI_BITS_10_BPT				2
202  #define SPI_BITS_11_BPT				3
203  #define SPI_BITS_12_BPT				4
204  #define SPI_BITS_13_BPT				5
205  #define SPI_BITS_14_BPT				6
206  #define SPI_BITS_15_BPT				7
207  #define SPI_BITS_16_BPT				8
208  #define SPI_ONE_DATA				0
209  #define SPI_TWO_DATA				1
210  #define SPI_FOUR_DATA				2
211  
212  /* Bit manipulation macros */
213  #define SPI_BIT(name) \
214  	(1 << SPI_##name##_OFFSET)
215  #define SPI_BF(name, value) \
216  	(((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET)
217  #define SPI_BFEXT(name, value) \
218  	(((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1))
219  #define SPI_BFINS(name, value, old) \
220  	(((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
221  	  | SPI_BF(name, value))
222  
223  /* Register access macros */
224  #define spi_readl(port, reg) \
225  	readl_relaxed((port)->regs + SPI_##reg)
226  #define spi_writel(port, reg, value) \
227  	writel_relaxed((value), (port)->regs + SPI_##reg)
228  #define spi_writew(port, reg, value) \
229  	writew_relaxed((value), (port)->regs + SPI_##reg)
230  
231  /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
232   * cache operations; better heuristics consider wordsize and bitrate.
233   */
234  #define DMA_MIN_BYTES	16
235  
236  #define SPI_DMA_TIMEOUT		(msecs_to_jiffies(1000))
237  
238  #define AUTOSUSPEND_TIMEOUT	2000
239  
240  struct atmel_spi_caps {
241  	bool	is_spi2;
242  	bool	has_wdrbt;
243  	bool	has_dma_support;
244  	bool	has_pdc_support;
245  };
246  
247  /*
248   * The core SPI transfer engine just talks to a register bank to set up
249   * DMA transfers; transfer queue progress is driven by IRQs.  The clock
250   * framework provides the base clock, subdivided for each spi_device.
251   */
252  struct atmel_spi {
253  	spinlock_t		lock;
254  	unsigned long		flags;
255  
256  	phys_addr_t		phybase;
257  	void __iomem		*regs;
258  	int			irq;
259  	struct clk		*clk;
260  	struct platform_device	*pdev;
261  	unsigned long		spi_clk;
262  
263  	struct spi_transfer	*current_transfer;
264  	int			current_remaining_bytes;
265  	int			done_status;
266  	dma_addr_t		dma_addr_rx_bbuf;
267  	dma_addr_t		dma_addr_tx_bbuf;
268  	void			*addr_rx_bbuf;
269  	void			*addr_tx_bbuf;
270  
271  	struct completion	xfer_completion;
272  
273  	struct atmel_spi_caps	caps;
274  
275  	bool			use_dma;
276  	bool			use_pdc;
277  
278  	bool			keep_cs;
279  
280  	u32			fifo_size;
281  	u8			native_cs_free;
282  	u8			native_cs_for_gpio;
283  };
284  
285  /* Controller-specific per-slave state */
286  struct atmel_spi_device {
287  	u32			csr;
288  };
289  
290  #define SPI_MAX_DMA_XFER	65535 /* true for both PDC and DMA */
291  #define INVALID_DMA_ADDRESS	0xffffffff
292  
293  /*
294   * Version 2 of the SPI controller has
295   *  - CR.LASTXFER
296   *  - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero)
297   *  - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs)
298   *  - SPI_CSRx.CSAAT
299   *  - SPI_CSRx.SBCR allows faster clocking
300   */
atmel_spi_is_v2(struct atmel_spi * as)301  static bool atmel_spi_is_v2(struct atmel_spi *as)
302  {
303  	return as->caps.is_spi2;
304  }
305  
306  /*
307   * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby
308   * they assume that spi slave device state will not change on deselect, so
309   * that automagic deselection is OK.  ("NPCSx rises if no data is to be
310   * transmitted")  Not so!  Workaround uses nCSx pins as GPIOs; or newer
311   * controllers have CSAAT and friends.
312   *
313   * Even controller newer than ar91rm9200, using GPIOs can make sens as
314   * it lets us support active-high chipselects despite the controller's
315   * belief that only active-low devices/systems exists.
316   *
317   * However, at91rm9200 has a second erratum whereby nCS0 doesn't work
318   * right when driven with GPIO.  ("Mode Fault does not allow more than one
319   * Master on Chip Select 0.")  No workaround exists for that ... so for
320   * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH,
321   * and (c) will trigger that first erratum in some cases.
322   */
323  
cs_activate(struct atmel_spi * as,struct spi_device * spi)324  static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
325  {
326  	struct atmel_spi_device *asd = spi->controller_state;
327  	int chip_select;
328  	u32 mr;
329  
330  	if (spi->cs_gpiod)
331  		chip_select = as->native_cs_for_gpio;
332  	else
333  		chip_select = spi->chip_select;
334  
335  	if (atmel_spi_is_v2(as)) {
336  		spi_writel(as, CSR0 + 4 * chip_select, asd->csr);
337  		/* For the low SPI version, there is a issue that PDC transfer
338  		 * on CS1,2,3 needs SPI_CSR0.BITS config as SPI_CSR1,2,3.BITS
339  		 */
340  		spi_writel(as, CSR0, asd->csr);
341  		if (as->caps.has_wdrbt) {
342  			spi_writel(as, MR,
343  					SPI_BF(PCS, ~(0x01 << chip_select))
344  					| SPI_BIT(WDRBT)
345  					| SPI_BIT(MODFDIS)
346  					| SPI_BIT(MSTR));
347  		} else {
348  			spi_writel(as, MR,
349  					SPI_BF(PCS, ~(0x01 << chip_select))
350  					| SPI_BIT(MODFDIS)
351  					| SPI_BIT(MSTR));
352  		}
353  
354  		mr = spi_readl(as, MR);
355  		if (spi->cs_gpiod)
356  			gpiod_set_value(spi->cs_gpiod, 1);
357  	} else {
358  		u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
359  		int i;
360  		u32 csr;
361  
362  		/* Make sure clock polarity is correct */
363  		for (i = 0; i < spi->master->num_chipselect; i++) {
364  			csr = spi_readl(as, CSR0 + 4 * i);
365  			if ((csr ^ cpol) & SPI_BIT(CPOL))
366  				spi_writel(as, CSR0 + 4 * i,
367  						csr ^ SPI_BIT(CPOL));
368  		}
369  
370  		mr = spi_readl(as, MR);
371  		mr = SPI_BFINS(PCS, ~(1 << chip_select), mr);
372  		if (spi->cs_gpiod)
373  			gpiod_set_value(spi->cs_gpiod, 1);
374  		spi_writel(as, MR, mr);
375  	}
376  
377  	dev_dbg(&spi->dev, "activate NPCS, mr %08x\n", mr);
378  }
379  
cs_deactivate(struct atmel_spi * as,struct spi_device * spi)380  static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
381  {
382  	int chip_select;
383  	u32 mr;
384  
385  	if (spi->cs_gpiod)
386  		chip_select = as->native_cs_for_gpio;
387  	else
388  		chip_select = spi->chip_select;
389  
390  	/* only deactivate *this* device; sometimes transfers to
391  	 * another device may be active when this routine is called.
392  	 */
393  	mr = spi_readl(as, MR);
394  	if (~SPI_BFEXT(PCS, mr) & (1 << chip_select)) {
395  		mr = SPI_BFINS(PCS, 0xf, mr);
396  		spi_writel(as, MR, mr);
397  	}
398  
399  	dev_dbg(&spi->dev, "DEactivate NPCS, mr %08x\n", mr);
400  
401  	if (!spi->cs_gpiod)
402  		spi_writel(as, CR, SPI_BIT(LASTXFER));
403  	else
404  		gpiod_set_value(spi->cs_gpiod, 0);
405  }
406  
atmel_spi_lock(struct atmel_spi * as)407  static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock)
408  {
409  	spin_lock_irqsave(&as->lock, as->flags);
410  }
411  
atmel_spi_unlock(struct atmel_spi * as)412  static void atmel_spi_unlock(struct atmel_spi *as) __releases(&as->lock)
413  {
414  	spin_unlock_irqrestore(&as->lock, as->flags);
415  }
416  
atmel_spi_is_vmalloc_xfer(struct spi_transfer * xfer)417  static inline bool atmel_spi_is_vmalloc_xfer(struct spi_transfer *xfer)
418  {
419  	return is_vmalloc_addr(xfer->tx_buf) || is_vmalloc_addr(xfer->rx_buf);
420  }
421  
atmel_spi_use_dma(struct atmel_spi * as,struct spi_transfer * xfer)422  static inline bool atmel_spi_use_dma(struct atmel_spi *as,
423  				struct spi_transfer *xfer)
424  {
425  	return as->use_dma && xfer->len >= DMA_MIN_BYTES;
426  }
427  
atmel_spi_can_dma(struct spi_master * master,struct spi_device * spi,struct spi_transfer * xfer)428  static bool atmel_spi_can_dma(struct spi_master *master,
429  			      struct spi_device *spi,
430  			      struct spi_transfer *xfer)
431  {
432  	struct atmel_spi *as = spi_master_get_devdata(master);
433  
434  	if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5))
435  		return atmel_spi_use_dma(as, xfer) &&
436  			!atmel_spi_is_vmalloc_xfer(xfer);
437  	else
438  		return atmel_spi_use_dma(as, xfer);
439  
440  }
441  
atmel_spi_dma_slave_config(struct atmel_spi * as,struct dma_slave_config * slave_config,u8 bits_per_word)442  static int atmel_spi_dma_slave_config(struct atmel_spi *as,
443  				struct dma_slave_config *slave_config,
444  				u8 bits_per_word)
445  {
446  	struct spi_master *master = platform_get_drvdata(as->pdev);
447  	int err = 0;
448  
449  	if (bits_per_word > 8) {
450  		slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
451  		slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
452  	} else {
453  		slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
454  		slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
455  	}
456  
457  	slave_config->dst_addr = (dma_addr_t)as->phybase + SPI_TDR;
458  	slave_config->src_addr = (dma_addr_t)as->phybase + SPI_RDR;
459  	slave_config->src_maxburst = 1;
460  	slave_config->dst_maxburst = 1;
461  	slave_config->device_fc = false;
462  
463  	/*
464  	 * This driver uses fixed peripheral select mode (PS bit set to '0' in
465  	 * the Mode Register).
466  	 * So according to the datasheet, when FIFOs are available (and
467  	 * enabled), the Transmit FIFO operates in Multiple Data Mode.
468  	 * In this mode, up to 2 data, not 4, can be written into the Transmit
469  	 * Data Register in a single access.
470  	 * However, the first data has to be written into the lowest 16 bits and
471  	 * the second data into the highest 16 bits of the Transmit
472  	 * Data Register. For 8bit data (the most frequent case), it would
473  	 * require to rework tx_buf so each data would actualy fit 16 bits.
474  	 * So we'd rather write only one data at the time. Hence the transmit
475  	 * path works the same whether FIFOs are available (and enabled) or not.
476  	 */
477  	slave_config->direction = DMA_MEM_TO_DEV;
478  	if (dmaengine_slave_config(master->dma_tx, slave_config)) {
479  		dev_err(&as->pdev->dev,
480  			"failed to configure tx dma channel\n");
481  		err = -EINVAL;
482  	}
483  
484  	/*
485  	 * This driver configures the spi controller for master mode (MSTR bit
486  	 * set to '1' in the Mode Register).
487  	 * So according to the datasheet, when FIFOs are available (and
488  	 * enabled), the Receive FIFO operates in Single Data Mode.
489  	 * So the receive path works the same whether FIFOs are available (and
490  	 * enabled) or not.
491  	 */
492  	slave_config->direction = DMA_DEV_TO_MEM;
493  	if (dmaengine_slave_config(master->dma_rx, slave_config)) {
494  		dev_err(&as->pdev->dev,
495  			"failed to configure rx dma channel\n");
496  		err = -EINVAL;
497  	}
498  
499  	return err;
500  }
501  
atmel_spi_configure_dma(struct spi_master * master,struct atmel_spi * as)502  static int atmel_spi_configure_dma(struct spi_master *master,
503  				   struct atmel_spi *as)
504  {
505  	struct dma_slave_config	slave_config;
506  	struct device *dev = &as->pdev->dev;
507  	int err;
508  
509  	dma_cap_mask_t mask;
510  	dma_cap_zero(mask);
511  	dma_cap_set(DMA_SLAVE, mask);
512  
513  	master->dma_tx = dma_request_chan(dev, "tx");
514  	if (IS_ERR(master->dma_tx)) {
515  		err = dev_err_probe(dev, PTR_ERR(master->dma_tx),
516  				    "No TX DMA channel, DMA is disabled\n");
517  		goto error_clear;
518  	}
519  
520  	master->dma_rx = dma_request_chan(dev, "rx");
521  	if (IS_ERR(master->dma_rx)) {
522  		err = PTR_ERR(master->dma_rx);
523  		/*
524  		 * No reason to check EPROBE_DEFER here since we have already
525  		 * requested tx channel.
526  		 */
527  		dev_err(dev, "No RX DMA channel, DMA is disabled\n");
528  		goto error;
529  	}
530  
531  	err = atmel_spi_dma_slave_config(as, &slave_config, 8);
532  	if (err)
533  		goto error;
534  
535  	dev_info(&as->pdev->dev,
536  			"Using %s (tx) and %s (rx) for DMA transfers\n",
537  			dma_chan_name(master->dma_tx),
538  			dma_chan_name(master->dma_rx));
539  
540  	return 0;
541  error:
542  	if (!IS_ERR(master->dma_rx))
543  		dma_release_channel(master->dma_rx);
544  	if (!IS_ERR(master->dma_tx))
545  		dma_release_channel(master->dma_tx);
546  error_clear:
547  	master->dma_tx = master->dma_rx = NULL;
548  	return err;
549  }
550  
atmel_spi_stop_dma(struct spi_master * master)551  static void atmel_spi_stop_dma(struct spi_master *master)
552  {
553  	if (master->dma_rx)
554  		dmaengine_terminate_all(master->dma_rx);
555  	if (master->dma_tx)
556  		dmaengine_terminate_all(master->dma_tx);
557  }
558  
atmel_spi_release_dma(struct spi_master * master)559  static void atmel_spi_release_dma(struct spi_master *master)
560  {
561  	if (master->dma_rx) {
562  		dma_release_channel(master->dma_rx);
563  		master->dma_rx = NULL;
564  	}
565  	if (master->dma_tx) {
566  		dma_release_channel(master->dma_tx);
567  		master->dma_tx = NULL;
568  	}
569  }
570  
571  /* This function is called by the DMA driver from tasklet context */
dma_callback(void * data)572  static void dma_callback(void *data)
573  {
574  	struct spi_master	*master = data;
575  	struct atmel_spi	*as = spi_master_get_devdata(master);
576  
577  	if (is_vmalloc_addr(as->current_transfer->rx_buf) &&
578  	    IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
579  		memcpy(as->current_transfer->rx_buf, as->addr_rx_bbuf,
580  		       as->current_transfer->len);
581  	}
582  	complete(&as->xfer_completion);
583  }
584  
585  /*
586   * Next transfer using PIO without FIFO.
587   */
atmel_spi_next_xfer_single(struct spi_master * master,struct spi_transfer * xfer)588  static void atmel_spi_next_xfer_single(struct spi_master *master,
589  				       struct spi_transfer *xfer)
590  {
591  	struct atmel_spi	*as = spi_master_get_devdata(master);
592  	unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
593  
594  	dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_pio\n");
595  
596  	/* Make sure data is not remaining in RDR */
597  	spi_readl(as, RDR);
598  	while (spi_readl(as, SR) & SPI_BIT(RDRF)) {
599  		spi_readl(as, RDR);
600  		cpu_relax();
601  	}
602  
603  	if (xfer->bits_per_word > 8)
604  		spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos));
605  	else
606  		spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos));
607  
608  	dev_dbg(master->dev.parent,
609  		"  start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
610  		xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
611  		xfer->bits_per_word);
612  
613  	/* Enable relevant interrupts */
614  	spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES));
615  }
616  
617  /*
618   * Next transfer using PIO with FIFO.
619   */
atmel_spi_next_xfer_fifo(struct spi_master * master,struct spi_transfer * xfer)620  static void atmel_spi_next_xfer_fifo(struct spi_master *master,
621  				     struct spi_transfer *xfer)
622  {
623  	struct atmel_spi *as = spi_master_get_devdata(master);
624  	u32 current_remaining_data, num_data;
625  	u32 offset = xfer->len - as->current_remaining_bytes;
626  	const u16 *words = (const u16 *)((u8 *)xfer->tx_buf + offset);
627  	const u8  *bytes = (const u8  *)((u8 *)xfer->tx_buf + offset);
628  	u16 td0, td1;
629  	u32 fifomr;
630  
631  	dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_fifo\n");
632  
633  	/* Compute the number of data to transfer in the current iteration */
634  	current_remaining_data = ((xfer->bits_per_word > 8) ?
635  				  ((u32)as->current_remaining_bytes >> 1) :
636  				  (u32)as->current_remaining_bytes);
637  	num_data = min(current_remaining_data, as->fifo_size);
638  
639  	/* Flush RX and TX FIFOs */
640  	spi_writel(as, CR, SPI_BIT(RXFCLR) | SPI_BIT(TXFCLR));
641  	while (spi_readl(as, FLR))
642  		cpu_relax();
643  
644  	/* Set RX FIFO Threshold to the number of data to transfer */
645  	fifomr = spi_readl(as, FMR);
646  	spi_writel(as, FMR, SPI_BFINS(RXFTHRES, num_data, fifomr));
647  
648  	/* Clear FIFO flags in the Status Register, especially RXFTHF */
649  	(void)spi_readl(as, SR);
650  
651  	/* Fill TX FIFO */
652  	while (num_data >= 2) {
653  		if (xfer->bits_per_word > 8) {
654  			td0 = *words++;
655  			td1 = *words++;
656  		} else {
657  			td0 = *bytes++;
658  			td1 = *bytes++;
659  		}
660  
661  		spi_writel(as, TDR, (td1 << 16) | td0);
662  		num_data -= 2;
663  	}
664  
665  	if (num_data) {
666  		if (xfer->bits_per_word > 8)
667  			td0 = *words++;
668  		else
669  			td0 = *bytes++;
670  
671  		spi_writew(as, TDR, td0);
672  		num_data--;
673  	}
674  
675  	dev_dbg(master->dev.parent,
676  		"  start fifo xfer %p: len %u tx %p rx %p bitpw %d\n",
677  		xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
678  		xfer->bits_per_word);
679  
680  	/*
681  	 * Enable RX FIFO Threshold Flag interrupt to be notified about
682  	 * transfer completion.
683  	 */
684  	spi_writel(as, IER, SPI_BIT(RXFTHF) | SPI_BIT(OVRES));
685  }
686  
687  /*
688   * Next transfer using PIO.
689   */
atmel_spi_next_xfer_pio(struct spi_master * master,struct spi_transfer * xfer)690  static void atmel_spi_next_xfer_pio(struct spi_master *master,
691  				    struct spi_transfer *xfer)
692  {
693  	struct atmel_spi *as = spi_master_get_devdata(master);
694  
695  	if (as->fifo_size)
696  		atmel_spi_next_xfer_fifo(master, xfer);
697  	else
698  		atmel_spi_next_xfer_single(master, xfer);
699  }
700  
701  /*
702   * Submit next transfer for DMA.
703   */
atmel_spi_next_xfer_dma_submit(struct spi_master * master,struct spi_transfer * xfer,u32 * plen)704  static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
705  				struct spi_transfer *xfer,
706  				u32 *plen)
707  	__must_hold(&as->lock)
708  {
709  	struct atmel_spi	*as = spi_master_get_devdata(master);
710  	struct dma_chan		*rxchan = master->dma_rx;
711  	struct dma_chan		*txchan = master->dma_tx;
712  	struct dma_async_tx_descriptor *rxdesc;
713  	struct dma_async_tx_descriptor *txdesc;
714  	struct dma_slave_config	slave_config;
715  	dma_cookie_t		cookie;
716  
717  	dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_dma_submit\n");
718  
719  	/* Check that the channels are available */
720  	if (!rxchan || !txchan)
721  		return -ENODEV;
722  
723  	/* release lock for DMA operations */
724  	atmel_spi_unlock(as);
725  
726  	*plen = xfer->len;
727  
728  	if (atmel_spi_dma_slave_config(as, &slave_config,
729  				       xfer->bits_per_word))
730  		goto err_exit;
731  
732  	/* Send both scatterlists */
733  	if (atmel_spi_is_vmalloc_xfer(xfer) &&
734  	    IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
735  		rxdesc = dmaengine_prep_slave_single(rxchan,
736  						     as->dma_addr_rx_bbuf,
737  						     xfer->len,
738  						     DMA_DEV_TO_MEM,
739  						     DMA_PREP_INTERRUPT |
740  						     DMA_CTRL_ACK);
741  	} else {
742  		rxdesc = dmaengine_prep_slave_sg(rxchan,
743  						 xfer->rx_sg.sgl,
744  						 xfer->rx_sg.nents,
745  						 DMA_DEV_TO_MEM,
746  						 DMA_PREP_INTERRUPT |
747  						 DMA_CTRL_ACK);
748  	}
749  	if (!rxdesc)
750  		goto err_dma;
751  
752  	if (atmel_spi_is_vmalloc_xfer(xfer) &&
753  	    IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
754  		memcpy(as->addr_tx_bbuf, xfer->tx_buf, xfer->len);
755  		txdesc = dmaengine_prep_slave_single(txchan,
756  						     as->dma_addr_tx_bbuf,
757  						     xfer->len, DMA_MEM_TO_DEV,
758  						     DMA_PREP_INTERRUPT |
759  						     DMA_CTRL_ACK);
760  	} else {
761  		txdesc = dmaengine_prep_slave_sg(txchan,
762  						 xfer->tx_sg.sgl,
763  						 xfer->tx_sg.nents,
764  						 DMA_MEM_TO_DEV,
765  						 DMA_PREP_INTERRUPT |
766  						 DMA_CTRL_ACK);
767  	}
768  	if (!txdesc)
769  		goto err_dma;
770  
771  	dev_dbg(master->dev.parent,
772  		"  start dma xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
773  		xfer, xfer->len, xfer->tx_buf, (unsigned long long)xfer->tx_dma,
774  		xfer->rx_buf, (unsigned long long)xfer->rx_dma);
775  
776  	/* Enable relevant interrupts */
777  	spi_writel(as, IER, SPI_BIT(OVRES));
778  
779  	/* Put the callback on the RX transfer only, that should finish last */
780  	rxdesc->callback = dma_callback;
781  	rxdesc->callback_param = master;
782  
783  	/* Submit and fire RX and TX with TX last so we're ready to read! */
784  	cookie = rxdesc->tx_submit(rxdesc);
785  	if (dma_submit_error(cookie))
786  		goto err_dma;
787  	cookie = txdesc->tx_submit(txdesc);
788  	if (dma_submit_error(cookie))
789  		goto err_dma;
790  	rxchan->device->device_issue_pending(rxchan);
791  	txchan->device->device_issue_pending(txchan);
792  
793  	/* take back lock */
794  	atmel_spi_lock(as);
795  	return 0;
796  
797  err_dma:
798  	spi_writel(as, IDR, SPI_BIT(OVRES));
799  	atmel_spi_stop_dma(master);
800  err_exit:
801  	atmel_spi_lock(as);
802  	return -ENOMEM;
803  }
804  
atmel_spi_next_xfer_data(struct spi_master * master,struct spi_transfer * xfer,dma_addr_t * tx_dma,dma_addr_t * rx_dma,u32 * plen)805  static void atmel_spi_next_xfer_data(struct spi_master *master,
806  				struct spi_transfer *xfer,
807  				dma_addr_t *tx_dma,
808  				dma_addr_t *rx_dma,
809  				u32 *plen)
810  {
811  	*rx_dma = xfer->rx_dma + xfer->len - *plen;
812  	*tx_dma = xfer->tx_dma + xfer->len - *plen;
813  	if (*plen > master->max_dma_len)
814  		*plen = master->max_dma_len;
815  }
816  
atmel_spi_set_xfer_speed(struct atmel_spi * as,struct spi_device * spi,struct spi_transfer * xfer)817  static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
818  				    struct spi_device *spi,
819  				    struct spi_transfer *xfer)
820  {
821  	u32			scbr, csr;
822  	unsigned long		bus_hz;
823  	int chip_select;
824  
825  	if (spi->cs_gpiod)
826  		chip_select = as->native_cs_for_gpio;
827  	else
828  		chip_select = spi->chip_select;
829  
830  	/* v1 chips start out at half the peripheral bus speed. */
831  	bus_hz = as->spi_clk;
832  	if (!atmel_spi_is_v2(as))
833  		bus_hz /= 2;
834  
835  	/*
836  	 * Calculate the lowest divider that satisfies the
837  	 * constraint, assuming div32/fdiv/mbz == 0.
838  	 */
839  	scbr = DIV_ROUND_UP(bus_hz, xfer->speed_hz);
840  
841  	/*
842  	 * If the resulting divider doesn't fit into the
843  	 * register bitfield, we can't satisfy the constraint.
844  	 */
845  	if (scbr >= (1 << SPI_SCBR_SIZE)) {
846  		dev_err(&spi->dev,
847  			"setup: %d Hz too slow, scbr %u; min %ld Hz\n",
848  			xfer->speed_hz, scbr, bus_hz/255);
849  		return -EINVAL;
850  	}
851  	if (scbr == 0) {
852  		dev_err(&spi->dev,
853  			"setup: %d Hz too high, scbr %u; max %ld Hz\n",
854  			xfer->speed_hz, scbr, bus_hz);
855  		return -EINVAL;
856  	}
857  	csr = spi_readl(as, CSR0 + 4 * chip_select);
858  	csr = SPI_BFINS(SCBR, scbr, csr);
859  	spi_writel(as, CSR0 + 4 * chip_select, csr);
860  	xfer->effective_speed_hz = bus_hz / scbr;
861  
862  	return 0;
863  }
864  
865  /*
866   * Submit next transfer for PDC.
867   * lock is held, spi irq is blocked
868   */
atmel_spi_pdc_next_xfer(struct spi_master * master,struct spi_message * msg,struct spi_transfer * xfer)869  static void atmel_spi_pdc_next_xfer(struct spi_master *master,
870  					struct spi_message *msg,
871  					struct spi_transfer *xfer)
872  {
873  	struct atmel_spi	*as = spi_master_get_devdata(master);
874  	u32			len;
875  	dma_addr_t		tx_dma, rx_dma;
876  
877  	spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
878  
879  	len = as->current_remaining_bytes;
880  	atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
881  	as->current_remaining_bytes -= len;
882  
883  	spi_writel(as, RPR, rx_dma);
884  	spi_writel(as, TPR, tx_dma);
885  
886  	if (msg->spi->bits_per_word > 8)
887  		len >>= 1;
888  	spi_writel(as, RCR, len);
889  	spi_writel(as, TCR, len);
890  
891  	dev_dbg(&msg->spi->dev,
892  		"  start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
893  		xfer, xfer->len, xfer->tx_buf,
894  		(unsigned long long)xfer->tx_dma, xfer->rx_buf,
895  		(unsigned long long)xfer->rx_dma);
896  
897  	if (as->current_remaining_bytes) {
898  		len = as->current_remaining_bytes;
899  		atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
900  		as->current_remaining_bytes -= len;
901  
902  		spi_writel(as, RNPR, rx_dma);
903  		spi_writel(as, TNPR, tx_dma);
904  
905  		if (msg->spi->bits_per_word > 8)
906  			len >>= 1;
907  		spi_writel(as, RNCR, len);
908  		spi_writel(as, TNCR, len);
909  
910  		dev_dbg(&msg->spi->dev,
911  			"  next xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
912  			xfer, xfer->len, xfer->tx_buf,
913  			(unsigned long long)xfer->tx_dma, xfer->rx_buf,
914  			(unsigned long long)xfer->rx_dma);
915  	}
916  
917  	/* REVISIT: We're waiting for RXBUFF before we start the next
918  	 * transfer because we need to handle some difficult timing
919  	 * issues otherwise. If we wait for TXBUFE in one transfer and
920  	 * then starts waiting for RXBUFF in the next, it's difficult
921  	 * to tell the difference between the RXBUFF interrupt we're
922  	 * actually waiting for and the RXBUFF interrupt of the
923  	 * previous transfer.
924  	 *
925  	 * It should be doable, though. Just not now...
926  	 */
927  	spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES));
928  	spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
929  }
930  
931  /*
932   * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
933   *  - The buffer is either valid for CPU access, else NULL
934   *  - If the buffer is valid, so is its DMA address
935   *
936   * This driver manages the dma address unless message->is_dma_mapped.
937   */
938  static int
atmel_spi_dma_map_xfer(struct atmel_spi * as,struct spi_transfer * xfer)939  atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
940  {
941  	struct device	*dev = &as->pdev->dev;
942  
943  	xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS;
944  	if (xfer->tx_buf) {
945  		/* tx_buf is a const void* where we need a void * for the dma
946  		 * mapping */
947  		void *nonconst_tx = (void *)xfer->tx_buf;
948  
949  		xfer->tx_dma = dma_map_single(dev,
950  				nonconst_tx, xfer->len,
951  				DMA_TO_DEVICE);
952  		if (dma_mapping_error(dev, xfer->tx_dma))
953  			return -ENOMEM;
954  	}
955  	if (xfer->rx_buf) {
956  		xfer->rx_dma = dma_map_single(dev,
957  				xfer->rx_buf, xfer->len,
958  				DMA_FROM_DEVICE);
959  		if (dma_mapping_error(dev, xfer->rx_dma)) {
960  			if (xfer->tx_buf)
961  				dma_unmap_single(dev,
962  						xfer->tx_dma, xfer->len,
963  						DMA_TO_DEVICE);
964  			return -ENOMEM;
965  		}
966  	}
967  	return 0;
968  }
969  
atmel_spi_dma_unmap_xfer(struct spi_master * master,struct spi_transfer * xfer)970  static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
971  				     struct spi_transfer *xfer)
972  {
973  	if (xfer->tx_dma != INVALID_DMA_ADDRESS)
974  		dma_unmap_single(master->dev.parent, xfer->tx_dma,
975  				 xfer->len, DMA_TO_DEVICE);
976  	if (xfer->rx_dma != INVALID_DMA_ADDRESS)
977  		dma_unmap_single(master->dev.parent, xfer->rx_dma,
978  				 xfer->len, DMA_FROM_DEVICE);
979  }
980  
atmel_spi_disable_pdc_transfer(struct atmel_spi * as)981  static void atmel_spi_disable_pdc_transfer(struct atmel_spi *as)
982  {
983  	spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
984  }
985  
986  static void
atmel_spi_pump_single_data(struct atmel_spi * as,struct spi_transfer * xfer)987  atmel_spi_pump_single_data(struct atmel_spi *as, struct spi_transfer *xfer)
988  {
989  	u8		*rxp;
990  	u16		*rxp16;
991  	unsigned long	xfer_pos = xfer->len - as->current_remaining_bytes;
992  
993  	if (xfer->bits_per_word > 8) {
994  		rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
995  		*rxp16 = spi_readl(as, RDR);
996  	} else {
997  		rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
998  		*rxp = spi_readl(as, RDR);
999  	}
1000  	if (xfer->bits_per_word > 8) {
1001  		if (as->current_remaining_bytes > 2)
1002  			as->current_remaining_bytes -= 2;
1003  		else
1004  			as->current_remaining_bytes = 0;
1005  	} else {
1006  		as->current_remaining_bytes--;
1007  	}
1008  }
1009  
1010  static void
atmel_spi_pump_fifo_data(struct atmel_spi * as,struct spi_transfer * xfer)1011  atmel_spi_pump_fifo_data(struct atmel_spi *as, struct spi_transfer *xfer)
1012  {
1013  	u32 fifolr = spi_readl(as, FLR);
1014  	u32 num_bytes, num_data = SPI_BFEXT(RXFL, fifolr);
1015  	u32 offset = xfer->len - as->current_remaining_bytes;
1016  	u16 *words = (u16 *)((u8 *)xfer->rx_buf + offset);
1017  	u8  *bytes = (u8  *)((u8 *)xfer->rx_buf + offset);
1018  	u16 rd; /* RD field is the lowest 16 bits of RDR */
1019  
1020  	/* Update the number of remaining bytes to transfer */
1021  	num_bytes = ((xfer->bits_per_word > 8) ?
1022  		     (num_data << 1) :
1023  		     num_data);
1024  
1025  	if (as->current_remaining_bytes > num_bytes)
1026  		as->current_remaining_bytes -= num_bytes;
1027  	else
1028  		as->current_remaining_bytes = 0;
1029  
1030  	/* Handle odd number of bytes when data are more than 8bit width */
1031  	if (xfer->bits_per_word > 8)
1032  		as->current_remaining_bytes &= ~0x1;
1033  
1034  	/* Read data */
1035  	while (num_data) {
1036  		rd = spi_readl(as, RDR);
1037  		if (xfer->bits_per_word > 8)
1038  			*words++ = rd;
1039  		else
1040  			*bytes++ = rd;
1041  		num_data--;
1042  	}
1043  }
1044  
1045  /* Called from IRQ
1046   *
1047   * Must update "current_remaining_bytes" to keep track of data
1048   * to transfer.
1049   */
1050  static void
atmel_spi_pump_pio_data(struct atmel_spi * as,struct spi_transfer * xfer)1051  atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
1052  {
1053  	if (as->fifo_size)
1054  		atmel_spi_pump_fifo_data(as, xfer);
1055  	else
1056  		atmel_spi_pump_single_data(as, xfer);
1057  }
1058  
1059  /* Interrupt
1060   *
1061   * No need for locking in this Interrupt handler: done_status is the
1062   * only information modified.
1063   */
1064  static irqreturn_t
atmel_spi_pio_interrupt(int irq,void * dev_id)1065  atmel_spi_pio_interrupt(int irq, void *dev_id)
1066  {
1067  	struct spi_master	*master = dev_id;
1068  	struct atmel_spi	*as = spi_master_get_devdata(master);
1069  	u32			status, pending, imr;
1070  	struct spi_transfer	*xfer;
1071  	int			ret = IRQ_NONE;
1072  
1073  	imr = spi_readl(as, IMR);
1074  	status = spi_readl(as, SR);
1075  	pending = status & imr;
1076  
1077  	if (pending & SPI_BIT(OVRES)) {
1078  		ret = IRQ_HANDLED;
1079  		spi_writel(as, IDR, SPI_BIT(OVRES));
1080  		dev_warn(master->dev.parent, "overrun\n");
1081  
1082  		/*
1083  		 * When we get an overrun, we disregard the current
1084  		 * transfer. Data will not be copied back from any
1085  		 * bounce buffer and msg->actual_len will not be
1086  		 * updated with the last xfer.
1087  		 *
1088  		 * We will also not process any remaning transfers in
1089  		 * the message.
1090  		 */
1091  		as->done_status = -EIO;
1092  		smp_wmb();
1093  
1094  		/* Clear any overrun happening while cleaning up */
1095  		spi_readl(as, SR);
1096  
1097  		complete(&as->xfer_completion);
1098  
1099  	} else if (pending & (SPI_BIT(RDRF) | SPI_BIT(RXFTHF))) {
1100  		atmel_spi_lock(as);
1101  
1102  		if (as->current_remaining_bytes) {
1103  			ret = IRQ_HANDLED;
1104  			xfer = as->current_transfer;
1105  			atmel_spi_pump_pio_data(as, xfer);
1106  			if (!as->current_remaining_bytes)
1107  				spi_writel(as, IDR, pending);
1108  
1109  			complete(&as->xfer_completion);
1110  		}
1111  
1112  		atmel_spi_unlock(as);
1113  	} else {
1114  		WARN_ONCE(pending, "IRQ not handled, pending = %x\n", pending);
1115  		ret = IRQ_HANDLED;
1116  		spi_writel(as, IDR, pending);
1117  	}
1118  
1119  	return ret;
1120  }
1121  
1122  static irqreturn_t
atmel_spi_pdc_interrupt(int irq,void * dev_id)1123  atmel_spi_pdc_interrupt(int irq, void *dev_id)
1124  {
1125  	struct spi_master	*master = dev_id;
1126  	struct atmel_spi	*as = spi_master_get_devdata(master);
1127  	u32			status, pending, imr;
1128  	int			ret = IRQ_NONE;
1129  
1130  	imr = spi_readl(as, IMR);
1131  	status = spi_readl(as, SR);
1132  	pending = status & imr;
1133  
1134  	if (pending & SPI_BIT(OVRES)) {
1135  
1136  		ret = IRQ_HANDLED;
1137  
1138  		spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX)
1139  				     | SPI_BIT(OVRES)));
1140  
1141  		/* Clear any overrun happening while cleaning up */
1142  		spi_readl(as, SR);
1143  
1144  		as->done_status = -EIO;
1145  
1146  		complete(&as->xfer_completion);
1147  
1148  	} else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) {
1149  		ret = IRQ_HANDLED;
1150  
1151  		spi_writel(as, IDR, pending);
1152  
1153  		complete(&as->xfer_completion);
1154  	}
1155  
1156  	return ret;
1157  }
1158  
atmel_word_delay_csr(struct spi_device * spi,struct atmel_spi * as)1159  static int atmel_word_delay_csr(struct spi_device *spi, struct atmel_spi *as)
1160  {
1161  	struct spi_delay *delay = &spi->word_delay;
1162  	u32 value = delay->value;
1163  
1164  	switch (delay->unit) {
1165  	case SPI_DELAY_UNIT_NSECS:
1166  		value /= 1000;
1167  		break;
1168  	case SPI_DELAY_UNIT_USECS:
1169  		break;
1170  	default:
1171  		return -EINVAL;
1172  	}
1173  
1174  	return (as->spi_clk / 1000000 * value) >> 5;
1175  }
1176  
initialize_native_cs_for_gpio(struct atmel_spi * as)1177  static void initialize_native_cs_for_gpio(struct atmel_spi *as)
1178  {
1179  	int i;
1180  	struct spi_master *master = platform_get_drvdata(as->pdev);
1181  
1182  	if (!as->native_cs_free)
1183  		return; /* already initialized */
1184  
1185  	if (!master->cs_gpiods)
1186  		return; /* No CS GPIO */
1187  
1188  	/*
1189  	 * On the first version of the controller (AT91RM9200), CS0
1190  	 * can't be used associated with GPIO
1191  	 */
1192  	if (atmel_spi_is_v2(as))
1193  		i = 0;
1194  	else
1195  		i = 1;
1196  
1197  	for (; i < 4; i++)
1198  		if (master->cs_gpiods[i])
1199  			as->native_cs_free |= BIT(i);
1200  
1201  	if (as->native_cs_free)
1202  		as->native_cs_for_gpio = ffs(as->native_cs_free);
1203  }
1204  
atmel_spi_setup(struct spi_device * spi)1205  static int atmel_spi_setup(struct spi_device *spi)
1206  {
1207  	struct atmel_spi	*as;
1208  	struct atmel_spi_device	*asd;
1209  	u32			csr;
1210  	unsigned int		bits = spi->bits_per_word;
1211  	int chip_select;
1212  	int			word_delay_csr;
1213  
1214  	as = spi_master_get_devdata(spi->master);
1215  
1216  	/* see notes above re chipselect */
1217  	if (!spi->cs_gpiod && (spi->mode & SPI_CS_HIGH)) {
1218  		dev_warn(&spi->dev, "setup: non GPIO CS can't be active-high\n");
1219  		return -EINVAL;
1220  	}
1221  
1222  	/* Setup() is called during spi_register_controller(aka
1223  	 * spi_register_master) but after all membmers of the cs_gpiod
1224  	 * array have been filled, so we can looked for which native
1225  	 * CS will be free for using with GPIO
1226  	 */
1227  	initialize_native_cs_for_gpio(as);
1228  
1229  	if (spi->cs_gpiod && as->native_cs_free) {
1230  		dev_err(&spi->dev,
1231  			"No native CS available to support this GPIO CS\n");
1232  		return -EBUSY;
1233  	}
1234  
1235  	if (spi->cs_gpiod)
1236  		chip_select = as->native_cs_for_gpio;
1237  	else
1238  		chip_select = spi->chip_select;
1239  
1240  	csr = SPI_BF(BITS, bits - 8);
1241  	if (spi->mode & SPI_CPOL)
1242  		csr |= SPI_BIT(CPOL);
1243  	if (!(spi->mode & SPI_CPHA))
1244  		csr |= SPI_BIT(NCPHA);
1245  
1246  	if (!spi->cs_gpiod)
1247  		csr |= SPI_BIT(CSAAT);
1248  	csr |= SPI_BF(DLYBS, 0);
1249  
1250  	word_delay_csr = atmel_word_delay_csr(spi, as);
1251  	if (word_delay_csr < 0)
1252  		return word_delay_csr;
1253  
1254  	/* DLYBCT adds delays between words.  This is useful for slow devices
1255  	 * that need a bit of time to setup the next transfer.
1256  	 */
1257  	csr |= SPI_BF(DLYBCT, word_delay_csr);
1258  
1259  	asd = spi->controller_state;
1260  	if (!asd) {
1261  		asd = kzalloc(sizeof(struct atmel_spi_device), GFP_KERNEL);
1262  		if (!asd)
1263  			return -ENOMEM;
1264  
1265  		spi->controller_state = asd;
1266  	}
1267  
1268  	asd->csr = csr;
1269  
1270  	dev_dbg(&spi->dev,
1271  		"setup: bpw %u mode 0x%x -> csr%d %08x\n",
1272  		bits, spi->mode, spi->chip_select, csr);
1273  
1274  	if (!atmel_spi_is_v2(as))
1275  		spi_writel(as, CSR0 + 4 * chip_select, csr);
1276  
1277  	return 0;
1278  }
1279  
atmel_spi_one_transfer(struct spi_master * master,struct spi_message * msg,struct spi_transfer * xfer)1280  static int atmel_spi_one_transfer(struct spi_master *master,
1281  					struct spi_message *msg,
1282  					struct spi_transfer *xfer)
1283  {
1284  	struct atmel_spi	*as;
1285  	struct spi_device	*spi = msg->spi;
1286  	u8			bits;
1287  	u32			len;
1288  	struct atmel_spi_device	*asd;
1289  	int			timeout;
1290  	int			ret;
1291  	unsigned long		dma_timeout;
1292  
1293  	as = spi_master_get_devdata(master);
1294  
1295  	if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1296  		dev_dbg(&spi->dev, "missing rx or tx buf\n");
1297  		return -EINVAL;
1298  	}
1299  
1300  	asd = spi->controller_state;
1301  	bits = (asd->csr >> 4) & 0xf;
1302  	if (bits != xfer->bits_per_word - 8) {
1303  		dev_dbg(&spi->dev,
1304  			"you can't yet change bits_per_word in transfers\n");
1305  		return -ENOPROTOOPT;
1306  	}
1307  
1308  	/*
1309  	 * DMA map early, for performance (empties dcache ASAP) and
1310  	 * better fault reporting.
1311  	 */
1312  	if ((!msg->is_dma_mapped)
1313  		&& as->use_pdc) {
1314  		if (atmel_spi_dma_map_xfer(as, xfer) < 0)
1315  			return -ENOMEM;
1316  	}
1317  
1318  	atmel_spi_set_xfer_speed(as, msg->spi, xfer);
1319  
1320  	as->done_status = 0;
1321  	as->current_transfer = xfer;
1322  	as->current_remaining_bytes = xfer->len;
1323  	while (as->current_remaining_bytes) {
1324  		reinit_completion(&as->xfer_completion);
1325  
1326  		if (as->use_pdc) {
1327  			atmel_spi_pdc_next_xfer(master, msg, xfer);
1328  		} else if (atmel_spi_use_dma(as, xfer)) {
1329  			len = as->current_remaining_bytes;
1330  			ret = atmel_spi_next_xfer_dma_submit(master,
1331  								xfer, &len);
1332  			if (ret) {
1333  				dev_err(&spi->dev,
1334  					"unable to use DMA, fallback to PIO\n");
1335  				atmel_spi_next_xfer_pio(master, xfer);
1336  			} else {
1337  				as->current_remaining_bytes -= len;
1338  				if (as->current_remaining_bytes < 0)
1339  					as->current_remaining_bytes = 0;
1340  			}
1341  		} else {
1342  			atmel_spi_next_xfer_pio(master, xfer);
1343  		}
1344  
1345  		/* interrupts are disabled, so free the lock for schedule */
1346  		atmel_spi_unlock(as);
1347  		dma_timeout = wait_for_completion_timeout(&as->xfer_completion,
1348  							  SPI_DMA_TIMEOUT);
1349  		atmel_spi_lock(as);
1350  		if (WARN_ON(dma_timeout == 0)) {
1351  			dev_err(&spi->dev, "spi transfer timeout\n");
1352  			as->done_status = -EIO;
1353  		}
1354  
1355  		if (as->done_status)
1356  			break;
1357  	}
1358  
1359  	if (as->done_status) {
1360  		if (as->use_pdc) {
1361  			dev_warn(master->dev.parent,
1362  				"overrun (%u/%u remaining)\n",
1363  				spi_readl(as, TCR), spi_readl(as, RCR));
1364  
1365  			/*
1366  			 * Clean up DMA registers and make sure the data
1367  			 * registers are empty.
1368  			 */
1369  			spi_writel(as, RNCR, 0);
1370  			spi_writel(as, TNCR, 0);
1371  			spi_writel(as, RCR, 0);
1372  			spi_writel(as, TCR, 0);
1373  			for (timeout = 1000; timeout; timeout--)
1374  				if (spi_readl(as, SR) & SPI_BIT(TXEMPTY))
1375  					break;
1376  			if (!timeout)
1377  				dev_warn(master->dev.parent,
1378  					 "timeout waiting for TXEMPTY");
1379  			while (spi_readl(as, SR) & SPI_BIT(RDRF))
1380  				spi_readl(as, RDR);
1381  
1382  			/* Clear any overrun happening while cleaning up */
1383  			spi_readl(as, SR);
1384  
1385  		} else if (atmel_spi_use_dma(as, xfer)) {
1386  			atmel_spi_stop_dma(master);
1387  		}
1388  
1389  		if (!msg->is_dma_mapped
1390  			&& as->use_pdc)
1391  			atmel_spi_dma_unmap_xfer(master, xfer);
1392  
1393  		return 0;
1394  
1395  	} else {
1396  		/* only update length if no error */
1397  		msg->actual_length += xfer->len;
1398  	}
1399  
1400  	if (!msg->is_dma_mapped
1401  		&& as->use_pdc)
1402  		atmel_spi_dma_unmap_xfer(master, xfer);
1403  
1404  	spi_transfer_delay_exec(xfer);
1405  
1406  	if (xfer->cs_change) {
1407  		if (list_is_last(&xfer->transfer_list,
1408  				 &msg->transfers)) {
1409  			as->keep_cs = true;
1410  		} else {
1411  			cs_deactivate(as, msg->spi);
1412  			udelay(10);
1413  			cs_activate(as, msg->spi);
1414  		}
1415  	}
1416  
1417  	return 0;
1418  }
1419  
atmel_spi_transfer_one_message(struct spi_master * master,struct spi_message * msg)1420  static int atmel_spi_transfer_one_message(struct spi_master *master,
1421  						struct spi_message *msg)
1422  {
1423  	struct atmel_spi *as;
1424  	struct spi_transfer *xfer;
1425  	struct spi_device *spi = msg->spi;
1426  	int ret = 0;
1427  
1428  	as = spi_master_get_devdata(master);
1429  
1430  	dev_dbg(&spi->dev, "new message %p submitted for %s\n",
1431  					msg, dev_name(&spi->dev));
1432  
1433  	atmel_spi_lock(as);
1434  	cs_activate(as, spi);
1435  
1436  	as->keep_cs = false;
1437  
1438  	msg->status = 0;
1439  	msg->actual_length = 0;
1440  
1441  	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1442  		trace_spi_transfer_start(msg, xfer);
1443  
1444  		ret = atmel_spi_one_transfer(master, msg, xfer);
1445  		if (ret)
1446  			goto msg_done;
1447  
1448  		trace_spi_transfer_stop(msg, xfer);
1449  	}
1450  
1451  	if (as->use_pdc)
1452  		atmel_spi_disable_pdc_transfer(as);
1453  
1454  	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1455  		dev_dbg(&spi->dev,
1456  			"  xfer %p: len %u tx %p/%pad rx %p/%pad\n",
1457  			xfer, xfer->len,
1458  			xfer->tx_buf, &xfer->tx_dma,
1459  			xfer->rx_buf, &xfer->rx_dma);
1460  	}
1461  
1462  msg_done:
1463  	if (!as->keep_cs)
1464  		cs_deactivate(as, msg->spi);
1465  
1466  	atmel_spi_unlock(as);
1467  
1468  	msg->status = as->done_status;
1469  	spi_finalize_current_message(spi->master);
1470  
1471  	return ret;
1472  }
1473  
atmel_spi_cleanup(struct spi_device * spi)1474  static void atmel_spi_cleanup(struct spi_device *spi)
1475  {
1476  	struct atmel_spi_device	*asd = spi->controller_state;
1477  
1478  	if (!asd)
1479  		return;
1480  
1481  	spi->controller_state = NULL;
1482  	kfree(asd);
1483  }
1484  
atmel_get_version(struct atmel_spi * as)1485  static inline unsigned int atmel_get_version(struct atmel_spi *as)
1486  {
1487  	return spi_readl(as, VERSION) & 0x00000fff;
1488  }
1489  
atmel_get_caps(struct atmel_spi * as)1490  static void atmel_get_caps(struct atmel_spi *as)
1491  {
1492  	unsigned int version;
1493  
1494  	version = atmel_get_version(as);
1495  
1496  	as->caps.is_spi2 = version > 0x121;
1497  	as->caps.has_wdrbt = version >= 0x210;
1498  	as->caps.has_dma_support = version >= 0x212;
1499  	as->caps.has_pdc_support = version < 0x212;
1500  }
1501  
atmel_spi_init(struct atmel_spi * as)1502  static void atmel_spi_init(struct atmel_spi *as)
1503  {
1504  	spi_writel(as, CR, SPI_BIT(SWRST));
1505  	spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
1506  
1507  	/* It is recommended to enable FIFOs first thing after reset */
1508  	if (as->fifo_size)
1509  		spi_writel(as, CR, SPI_BIT(FIFOEN));
1510  
1511  	if (as->caps.has_wdrbt) {
1512  		spi_writel(as, MR, SPI_BIT(WDRBT) | SPI_BIT(MODFDIS)
1513  				| SPI_BIT(MSTR));
1514  	} else {
1515  		spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS));
1516  	}
1517  
1518  	if (as->use_pdc)
1519  		spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
1520  	spi_writel(as, CR, SPI_BIT(SPIEN));
1521  }
1522  
atmel_spi_probe(struct platform_device * pdev)1523  static int atmel_spi_probe(struct platform_device *pdev)
1524  {
1525  	struct resource		*regs;
1526  	int			irq;
1527  	struct clk		*clk;
1528  	int			ret;
1529  	struct spi_master	*master;
1530  	struct atmel_spi	*as;
1531  
1532  	/* Select default pin state */
1533  	pinctrl_pm_select_default_state(&pdev->dev);
1534  
1535  	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1536  	if (!regs)
1537  		return -ENXIO;
1538  
1539  	irq = platform_get_irq(pdev, 0);
1540  	if (irq < 0)
1541  		return irq;
1542  
1543  	clk = devm_clk_get(&pdev->dev, "spi_clk");
1544  	if (IS_ERR(clk))
1545  		return PTR_ERR(clk);
1546  
1547  	/* setup spi core then atmel-specific driver state */
1548  	master = spi_alloc_master(&pdev->dev, sizeof(*as));
1549  	if (!master)
1550  		return -ENOMEM;
1551  
1552  	/* the spi->mode bits understood by this driver: */
1553  	master->use_gpio_descriptors = true;
1554  	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1555  	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
1556  	master->dev.of_node = pdev->dev.of_node;
1557  	master->bus_num = pdev->id;
1558  	master->num_chipselect = 4;
1559  	master->setup = atmel_spi_setup;
1560  	master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX);
1561  	master->transfer_one_message = atmel_spi_transfer_one_message;
1562  	master->cleanup = atmel_spi_cleanup;
1563  	master->auto_runtime_pm = true;
1564  	master->max_dma_len = SPI_MAX_DMA_XFER;
1565  	master->can_dma = atmel_spi_can_dma;
1566  	platform_set_drvdata(pdev, master);
1567  
1568  	as = spi_master_get_devdata(master);
1569  
1570  	spin_lock_init(&as->lock);
1571  
1572  	as->pdev = pdev;
1573  	as->regs = devm_ioremap_resource(&pdev->dev, regs);
1574  	if (IS_ERR(as->regs)) {
1575  		ret = PTR_ERR(as->regs);
1576  		goto out_unmap_regs;
1577  	}
1578  	as->phybase = regs->start;
1579  	as->irq = irq;
1580  	as->clk = clk;
1581  
1582  	init_completion(&as->xfer_completion);
1583  
1584  	atmel_get_caps(as);
1585  
1586  	as->use_dma = false;
1587  	as->use_pdc = false;
1588  	if (as->caps.has_dma_support) {
1589  		ret = atmel_spi_configure_dma(master, as);
1590  		if (ret == 0) {
1591  			as->use_dma = true;
1592  		} else if (ret == -EPROBE_DEFER) {
1593  			return ret;
1594  		}
1595  	} else if (as->caps.has_pdc_support) {
1596  		as->use_pdc = true;
1597  	}
1598  
1599  	if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
1600  		as->addr_rx_bbuf = dma_alloc_coherent(&pdev->dev,
1601  						      SPI_MAX_DMA_XFER,
1602  						      &as->dma_addr_rx_bbuf,
1603  						      GFP_KERNEL | GFP_DMA);
1604  		if (!as->addr_rx_bbuf) {
1605  			as->use_dma = false;
1606  		} else {
1607  			as->addr_tx_bbuf = dma_alloc_coherent(&pdev->dev,
1608  					SPI_MAX_DMA_XFER,
1609  					&as->dma_addr_tx_bbuf,
1610  					GFP_KERNEL | GFP_DMA);
1611  			if (!as->addr_tx_bbuf) {
1612  				as->use_dma = false;
1613  				dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
1614  						  as->addr_rx_bbuf,
1615  						  as->dma_addr_rx_bbuf);
1616  			}
1617  		}
1618  		if (!as->use_dma)
1619  			dev_info(master->dev.parent,
1620  				 "  can not allocate dma coherent memory\n");
1621  	}
1622  
1623  	if (as->caps.has_dma_support && !as->use_dma)
1624  		dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n");
1625  
1626  	if (as->use_pdc) {
1627  		ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pdc_interrupt,
1628  					0, dev_name(&pdev->dev), master);
1629  	} else {
1630  		ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pio_interrupt,
1631  					0, dev_name(&pdev->dev), master);
1632  	}
1633  	if (ret)
1634  		goto out_unmap_regs;
1635  
1636  	/* Initialize the hardware */
1637  	ret = clk_prepare_enable(clk);
1638  	if (ret)
1639  		goto out_free_irq;
1640  
1641  	as->spi_clk = clk_get_rate(clk);
1642  
1643  	as->fifo_size = 0;
1644  	if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size",
1645  				  &as->fifo_size)) {
1646  		dev_info(&pdev->dev, "Using FIFO (%u data)\n", as->fifo_size);
1647  	}
1648  
1649  	atmel_spi_init(as);
1650  
1651  	pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
1652  	pm_runtime_use_autosuspend(&pdev->dev);
1653  	pm_runtime_set_active(&pdev->dev);
1654  	pm_runtime_enable(&pdev->dev);
1655  
1656  	ret = devm_spi_register_master(&pdev->dev, master);
1657  	if (ret)
1658  		goto out_free_dma;
1659  
1660  	/* go! */
1661  	dev_info(&pdev->dev, "Atmel SPI Controller version 0x%x at 0x%08lx (irq %d)\n",
1662  			atmel_get_version(as), (unsigned long)regs->start,
1663  			irq);
1664  
1665  	return 0;
1666  
1667  out_free_dma:
1668  	pm_runtime_disable(&pdev->dev);
1669  	pm_runtime_set_suspended(&pdev->dev);
1670  
1671  	if (as->use_dma)
1672  		atmel_spi_release_dma(master);
1673  
1674  	spi_writel(as, CR, SPI_BIT(SWRST));
1675  	spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
1676  	clk_disable_unprepare(clk);
1677  out_free_irq:
1678  out_unmap_regs:
1679  	spi_master_put(master);
1680  	return ret;
1681  }
1682  
atmel_spi_remove(struct platform_device * pdev)1683  static int atmel_spi_remove(struct platform_device *pdev)
1684  {
1685  	struct spi_master	*master = platform_get_drvdata(pdev);
1686  	struct atmel_spi	*as = spi_master_get_devdata(master);
1687  
1688  	pm_runtime_get_sync(&pdev->dev);
1689  
1690  	/* reset the hardware and block queue progress */
1691  	if (as->use_dma) {
1692  		atmel_spi_stop_dma(master);
1693  		atmel_spi_release_dma(master);
1694  		if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
1695  			dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
1696  					  as->addr_tx_bbuf,
1697  					  as->dma_addr_tx_bbuf);
1698  			dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
1699  					  as->addr_rx_bbuf,
1700  					  as->dma_addr_rx_bbuf);
1701  		}
1702  	}
1703  
1704  	spin_lock_irq(&as->lock);
1705  	spi_writel(as, CR, SPI_BIT(SWRST));
1706  	spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
1707  	spi_readl(as, SR);
1708  	spin_unlock_irq(&as->lock);
1709  
1710  	clk_disable_unprepare(as->clk);
1711  
1712  	pm_runtime_put_noidle(&pdev->dev);
1713  	pm_runtime_disable(&pdev->dev);
1714  
1715  	return 0;
1716  }
1717  
1718  #ifdef CONFIG_PM
atmel_spi_runtime_suspend(struct device * dev)1719  static int atmel_spi_runtime_suspend(struct device *dev)
1720  {
1721  	struct spi_master *master = dev_get_drvdata(dev);
1722  	struct atmel_spi *as = spi_master_get_devdata(master);
1723  
1724  	clk_disable_unprepare(as->clk);
1725  	pinctrl_pm_select_sleep_state(dev);
1726  
1727  	return 0;
1728  }
1729  
atmel_spi_runtime_resume(struct device * dev)1730  static int atmel_spi_runtime_resume(struct device *dev)
1731  {
1732  	struct spi_master *master = dev_get_drvdata(dev);
1733  	struct atmel_spi *as = spi_master_get_devdata(master);
1734  
1735  	pinctrl_pm_select_default_state(dev);
1736  
1737  	return clk_prepare_enable(as->clk);
1738  }
1739  
1740  #ifdef CONFIG_PM_SLEEP
atmel_spi_suspend(struct device * dev)1741  static int atmel_spi_suspend(struct device *dev)
1742  {
1743  	struct spi_master *master = dev_get_drvdata(dev);
1744  	int ret;
1745  
1746  	/* Stop the queue running */
1747  	ret = spi_master_suspend(master);
1748  	if (ret)
1749  		return ret;
1750  
1751  	if (!pm_runtime_suspended(dev))
1752  		atmel_spi_runtime_suspend(dev);
1753  
1754  	return 0;
1755  }
1756  
atmel_spi_resume(struct device * dev)1757  static int atmel_spi_resume(struct device *dev)
1758  {
1759  	struct spi_master *master = dev_get_drvdata(dev);
1760  	struct atmel_spi *as = spi_master_get_devdata(master);
1761  	int ret;
1762  
1763  	ret = clk_prepare_enable(as->clk);
1764  	if (ret)
1765  		return ret;
1766  
1767  	atmel_spi_init(as);
1768  
1769  	clk_disable_unprepare(as->clk);
1770  
1771  	if (!pm_runtime_suspended(dev)) {
1772  		ret = atmel_spi_runtime_resume(dev);
1773  		if (ret)
1774  			return ret;
1775  	}
1776  
1777  	/* Start the queue running */
1778  	return spi_master_resume(master);
1779  }
1780  #endif
1781  
1782  static const struct dev_pm_ops atmel_spi_pm_ops = {
1783  	SET_SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume)
1784  	SET_RUNTIME_PM_OPS(atmel_spi_runtime_suspend,
1785  			   atmel_spi_runtime_resume, NULL)
1786  };
1787  #define ATMEL_SPI_PM_OPS	(&atmel_spi_pm_ops)
1788  #else
1789  #define ATMEL_SPI_PM_OPS	NULL
1790  #endif
1791  
1792  static const struct of_device_id atmel_spi_dt_ids[] = {
1793  	{ .compatible = "atmel,at91rm9200-spi" },
1794  	{ /* sentinel */ }
1795  };
1796  
1797  MODULE_DEVICE_TABLE(of, atmel_spi_dt_ids);
1798  
1799  static struct platform_driver atmel_spi_driver = {
1800  	.driver		= {
1801  		.name	= "atmel_spi",
1802  		.pm	= ATMEL_SPI_PM_OPS,
1803  		.of_match_table	= atmel_spi_dt_ids,
1804  	},
1805  	.probe		= atmel_spi_probe,
1806  	.remove		= atmel_spi_remove,
1807  };
1808  module_platform_driver(atmel_spi_driver);
1809  
1810  MODULE_DESCRIPTION("Atmel AT32/AT91 SPI Controller driver");
1811  MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1812  MODULE_LICENSE("GPL");
1813  MODULE_ALIAS("platform:atmel_spi");
1814