1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Designware SPI core controller driver (refer pxa2xx_spi.c)
4 *
5 * Copyright (c) 2009, Intel Corporation.
6 */
7
8 #include <linux/dma-mapping.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/highmem.h>
12 #include <linux/delay.h>
13 #include <linux/slab.h>
14 #include <linux/spi/spi.h>
15
16 #include "spi-dw.h"
17
18 #ifdef CONFIG_DEBUG_FS
19 #include <linux/debugfs.h>
20 #endif
21
22 /* Slave spi_dev related */
23 struct chip_data {
24 u8 tmode; /* TR/TO/RO/EEPROM */
25 u8 type; /* SPI/SSP/MicroWire */
26
27 u8 poll_mode; /* 1 means use poll mode */
28
29 u16 clk_div; /* baud rate divider */
30 u32 speed_hz; /* baud rate */
31 void (*cs_control)(u32 command);
32 };
33
34 #ifdef CONFIG_DEBUG_FS
35 #define SPI_REGS_BUFSIZE 1024
dw_spi_show_regs(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)36 static ssize_t dw_spi_show_regs(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos)
38 {
39 struct dw_spi *dws = file->private_data;
40 char *buf;
41 u32 len = 0;
42 ssize_t ret;
43
44 buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
45 if (!buf)
46 return 0;
47
48 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
49 "%s registers:\n", dev_name(&dws->master->dev));
50 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
51 "=================================\n");
52 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
53 "CTRL0: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL0));
54 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
55 "CTRL1: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL1));
56 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
57 "SSIENR: \t0x%08x\n", dw_readl(dws, DW_SPI_SSIENR));
58 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
59 "SER: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SER));
60 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
61 "BAUDR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_BAUDR));
62 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
63 "TXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_TXFLTR));
64 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
65 "RXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_RXFLTR));
66 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
67 "TXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_TXFLR));
68 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
69 "RXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_RXFLR));
70 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
71 "SR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SR));
72 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
73 "IMR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_IMR));
74 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
75 "ISR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_ISR));
76 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
77 "DMACR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_DMACR));
78 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
79 "DMATDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMATDLR));
80 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
81 "DMARDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMARDLR));
82 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
83 "=================================\n");
84
85 ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
86 kfree(buf);
87 return ret;
88 }
89
90 static const struct file_operations dw_spi_regs_ops = {
91 .owner = THIS_MODULE,
92 .open = simple_open,
93 .read = dw_spi_show_regs,
94 .llseek = default_llseek,
95 };
96
dw_spi_debugfs_init(struct dw_spi * dws)97 static int dw_spi_debugfs_init(struct dw_spi *dws)
98 {
99 char name[32];
100
101 snprintf(name, 32, "dw_spi%d", dws->master->bus_num);
102 dws->debugfs = debugfs_create_dir(name, NULL);
103 if (!dws->debugfs)
104 return -ENOMEM;
105
106 debugfs_create_file("registers", S_IFREG | S_IRUGO,
107 dws->debugfs, (void *)dws, &dw_spi_regs_ops);
108 return 0;
109 }
110
dw_spi_debugfs_remove(struct dw_spi * dws)111 static void dw_spi_debugfs_remove(struct dw_spi *dws)
112 {
113 debugfs_remove_recursive(dws->debugfs);
114 }
115
116 #else
dw_spi_debugfs_init(struct dw_spi * dws)117 static inline int dw_spi_debugfs_init(struct dw_spi *dws)
118 {
119 return 0;
120 }
121
dw_spi_debugfs_remove(struct dw_spi * dws)122 static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
123 {
124 }
125 #endif /* CONFIG_DEBUG_FS */
126
dw_spi_set_cs(struct spi_device * spi,bool enable)127 void dw_spi_set_cs(struct spi_device *spi, bool enable)
128 {
129 struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
130 struct chip_data *chip = spi_get_ctldata(spi);
131
132 if (chip && chip->cs_control)
133 chip->cs_control(enable);
134
135 if (enable)
136 dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
137 else if (dws->cs_override)
138 dw_writel(dws, DW_SPI_SER, 0);
139 }
140 EXPORT_SYMBOL_GPL(dw_spi_set_cs);
141
142 /* Return the max entries we can fill into tx fifo */
tx_max(struct dw_spi * dws)143 static inline u32 tx_max(struct dw_spi *dws)
144 {
145 u32 tx_left, tx_room, rxtx_gap;
146
147 tx_left = (dws->tx_end - dws->tx) / dws->n_bytes;
148 tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);
149
150 /*
151 * Another concern is about the tx/rx mismatch, we
152 * though to use (dws->fifo_len - rxflr - txflr) as
153 * one maximum value for tx, but it doesn't cover the
154 * data which is out of tx/rx fifo and inside the
155 * shift registers. So a control from sw point of
156 * view is taken.
157 */
158 rxtx_gap = ((dws->rx_end - dws->rx) - (dws->tx_end - dws->tx))
159 / dws->n_bytes;
160
161 return min3(tx_left, tx_room, (u32) (dws->fifo_len - rxtx_gap));
162 }
163
164 /* Return the max entries we should read out of rx fifo */
rx_max(struct dw_spi * dws)165 static inline u32 rx_max(struct dw_spi *dws)
166 {
167 u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes;
168
169 return min_t(u32, rx_left, dw_readl(dws, DW_SPI_RXFLR));
170 }
171
dw_writer(struct dw_spi * dws)172 static void dw_writer(struct dw_spi *dws)
173 {
174 u32 max = tx_max(dws);
175 u16 txw = 0;
176
177 while (max--) {
178 /* Set the tx word if the transfer's original "tx" is not null */
179 if (dws->tx_end - dws->len) {
180 if (dws->n_bytes == 1)
181 txw = *(u8 *)(dws->tx);
182 else
183 txw = *(u16 *)(dws->tx);
184 }
185 dw_write_io_reg(dws, DW_SPI_DR, txw);
186 dws->tx += dws->n_bytes;
187 }
188 }
189
dw_reader(struct dw_spi * dws)190 static void dw_reader(struct dw_spi *dws)
191 {
192 u32 max = rx_max(dws);
193 u16 rxw;
194
195 while (max--) {
196 rxw = dw_read_io_reg(dws, DW_SPI_DR);
197 /* Care rx only if the transfer's original "rx" is not null */
198 if (dws->rx_end - dws->len) {
199 if (dws->n_bytes == 1)
200 *(u8 *)(dws->rx) = rxw;
201 else
202 *(u16 *)(dws->rx) = rxw;
203 }
204 dws->rx += dws->n_bytes;
205 }
206 }
207
int_error_stop(struct dw_spi * dws,const char * msg)208 static void int_error_stop(struct dw_spi *dws, const char *msg)
209 {
210 spi_reset_chip(dws);
211
212 dev_err(&dws->master->dev, "%s\n", msg);
213 dws->master->cur_msg->status = -EIO;
214 spi_finalize_current_transfer(dws->master);
215 }
216
interrupt_transfer(struct dw_spi * dws)217 static irqreturn_t interrupt_transfer(struct dw_spi *dws)
218 {
219 u16 irq_status = dw_readl(dws, DW_SPI_ISR);
220
221 /* Error handling */
222 if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
223 dw_readl(dws, DW_SPI_ICR);
224 int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
225 return IRQ_HANDLED;
226 }
227
228 dw_reader(dws);
229 if (dws->rx_end == dws->rx) {
230 spi_mask_intr(dws, SPI_INT_TXEI);
231 spi_finalize_current_transfer(dws->master);
232 return IRQ_HANDLED;
233 }
234 if (irq_status & SPI_INT_TXEI) {
235 spi_mask_intr(dws, SPI_INT_TXEI);
236 dw_writer(dws);
237 /* Enable TX irq always, it will be disabled when RX finished */
238 spi_umask_intr(dws, SPI_INT_TXEI);
239 }
240
241 return IRQ_HANDLED;
242 }
243
dw_spi_irq(int irq,void * dev_id)244 static irqreturn_t dw_spi_irq(int irq, void *dev_id)
245 {
246 struct spi_controller *master = dev_id;
247 struct dw_spi *dws = spi_controller_get_devdata(master);
248 u16 irq_status = dw_readl(dws, DW_SPI_ISR) & 0x3f;
249
250 if (!irq_status)
251 return IRQ_NONE;
252
253 if (!master->cur_msg) {
254 spi_mask_intr(dws, SPI_INT_TXEI);
255 return IRQ_HANDLED;
256 }
257
258 return dws->transfer_handler(dws);
259 }
260
261 /* Must be called inside pump_transfers() */
poll_transfer(struct dw_spi * dws)262 static int poll_transfer(struct dw_spi *dws)
263 {
264 do {
265 dw_writer(dws);
266 dw_reader(dws);
267 cpu_relax();
268 } while (dws->rx_end > dws->rx);
269
270 return 0;
271 }
272
dw_spi_transfer_one(struct spi_controller * master,struct spi_device * spi,struct spi_transfer * transfer)273 static int dw_spi_transfer_one(struct spi_controller *master,
274 struct spi_device *spi, struct spi_transfer *transfer)
275 {
276 struct dw_spi *dws = spi_controller_get_devdata(master);
277 struct chip_data *chip = spi_get_ctldata(spi);
278 u8 imask = 0;
279 u16 txlevel = 0;
280 u32 cr0;
281 int ret;
282
283 dws->dma_mapped = 0;
284
285 dws->tx = (void *)transfer->tx_buf;
286 dws->tx_end = dws->tx + transfer->len;
287 dws->rx = transfer->rx_buf;
288 dws->rx_end = dws->rx + transfer->len;
289 dws->len = transfer->len;
290
291 spi_enable_chip(dws, 0);
292
293 /* Handle per transfer options for bpw and speed */
294 if (transfer->speed_hz != dws->current_freq) {
295 if (transfer->speed_hz != chip->speed_hz) {
296 /* clk_div doesn't support odd number */
297 chip->clk_div = (DIV_ROUND_UP(dws->max_freq, transfer->speed_hz) + 1) & 0xfffe;
298 chip->speed_hz = transfer->speed_hz;
299 }
300 dws->current_freq = transfer->speed_hz;
301 spi_set_clk(dws, chip->clk_div);
302 }
303
304 dws->n_bytes = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
305 dws->dma_width = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
306
307 /* Default SPI mode is SCPOL = 0, SCPH = 0 */
308 cr0 = (transfer->bits_per_word - 1)
309 | (chip->type << SPI_FRF_OFFSET)
310 | ((((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET) |
311 (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET))
312 | (chip->tmode << SPI_TMOD_OFFSET);
313
314 /*
315 * Adjust transfer mode if necessary. Requires platform dependent
316 * chipselect mechanism.
317 */
318 if (chip->cs_control) {
319 if (dws->rx && dws->tx)
320 chip->tmode = SPI_TMOD_TR;
321 else if (dws->rx)
322 chip->tmode = SPI_TMOD_RO;
323 else
324 chip->tmode = SPI_TMOD_TO;
325
326 cr0 &= ~SPI_TMOD_MASK;
327 cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
328 }
329
330 dw_writel(dws, DW_SPI_CTRL0, cr0);
331
332 /* Check if current transfer is a DMA transaction */
333 if (master->can_dma && master->can_dma(master, spi, transfer))
334 dws->dma_mapped = master->cur_msg_mapped;
335
336 /* For poll mode just disable all interrupts */
337 spi_mask_intr(dws, 0xff);
338
339 /*
340 * Interrupt mode
341 * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
342 */
343 if (dws->dma_mapped) {
344 ret = dws->dma_ops->dma_setup(dws, transfer);
345 if (ret < 0) {
346 spi_enable_chip(dws, 1);
347 return ret;
348 }
349 } else if (!chip->poll_mode) {
350 txlevel = min_t(u16, dws->fifo_len / 2, dws->len / dws->n_bytes);
351 dw_writel(dws, DW_SPI_TXFLTR, txlevel);
352
353 /* Set the interrupt mask */
354 imask |= SPI_INT_TXEI | SPI_INT_TXOI |
355 SPI_INT_RXUI | SPI_INT_RXOI;
356 spi_umask_intr(dws, imask);
357
358 dws->transfer_handler = interrupt_transfer;
359 }
360
361 spi_enable_chip(dws, 1);
362
363 if (dws->dma_mapped) {
364 ret = dws->dma_ops->dma_transfer(dws, transfer);
365 if (ret < 0)
366 return ret;
367 }
368
369 if (chip->poll_mode)
370 return poll_transfer(dws);
371
372 return 1;
373 }
374
dw_spi_handle_err(struct spi_controller * master,struct spi_message * msg)375 static void dw_spi_handle_err(struct spi_controller *master,
376 struct spi_message *msg)
377 {
378 struct dw_spi *dws = spi_controller_get_devdata(master);
379
380 if (dws->dma_mapped)
381 dws->dma_ops->dma_stop(dws);
382
383 spi_reset_chip(dws);
384 }
385
386 /* This may be called twice for each spi dev */
dw_spi_setup(struct spi_device * spi)387 static int dw_spi_setup(struct spi_device *spi)
388 {
389 struct dw_spi_chip *chip_info = NULL;
390 struct chip_data *chip;
391
392 /* Only alloc on first setup */
393 chip = spi_get_ctldata(spi);
394 if (!chip) {
395 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
396 if (!chip)
397 return -ENOMEM;
398 spi_set_ctldata(spi, chip);
399 }
400
401 /*
402 * Protocol drivers may change the chip settings, so...
403 * if chip_info exists, use it
404 */
405 chip_info = spi->controller_data;
406
407 /* chip_info doesn't always exist */
408 if (chip_info) {
409 if (chip_info->cs_control)
410 chip->cs_control = chip_info->cs_control;
411
412 chip->poll_mode = chip_info->poll_mode;
413 chip->type = chip_info->type;
414 }
415
416 chip->tmode = SPI_TMOD_TR;
417
418 return 0;
419 }
420
dw_spi_cleanup(struct spi_device * spi)421 static void dw_spi_cleanup(struct spi_device *spi)
422 {
423 struct chip_data *chip = spi_get_ctldata(spi);
424
425 kfree(chip);
426 spi_set_ctldata(spi, NULL);
427 }
428
429 /* Restart the controller, disable all interrupts, clean rx fifo */
spi_hw_init(struct device * dev,struct dw_spi * dws)430 static void spi_hw_init(struct device *dev, struct dw_spi *dws)
431 {
432 spi_reset_chip(dws);
433
434 /*
435 * Try to detect the FIFO depth if not set by interface driver,
436 * the depth could be from 2 to 256 from HW spec
437 */
438 if (!dws->fifo_len) {
439 u32 fifo;
440
441 for (fifo = 1; fifo < 256; fifo++) {
442 dw_writel(dws, DW_SPI_TXFLTR, fifo);
443 if (fifo != dw_readl(dws, DW_SPI_TXFLTR))
444 break;
445 }
446 dw_writel(dws, DW_SPI_TXFLTR, 0);
447
448 dws->fifo_len = (fifo == 1) ? 0 : fifo;
449 dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
450 }
451
452 /* enable HW fixup for explicit CS deselect for Amazon's alpine chip */
453 if (dws->cs_override)
454 dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF);
455 }
456
dw_spi_add_host(struct device * dev,struct dw_spi * dws)457 int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
458 {
459 struct spi_controller *master;
460 int ret;
461
462 BUG_ON(dws == NULL);
463
464 master = spi_alloc_master(dev, 0);
465 if (!master)
466 return -ENOMEM;
467
468 dws->master = master;
469 dws->type = SSI_MOTO_SPI;
470 dws->dma_inited = 0;
471 dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
472
473 spi_controller_set_devdata(master, dws);
474
475 ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
476 master);
477 if (ret < 0) {
478 dev_err(dev, "can not get IRQ\n");
479 goto err_free_master;
480 }
481
482 master->use_gpio_descriptors = true;
483 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
484 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
485 master->bus_num = dws->bus_num;
486 master->num_chipselect = dws->num_cs;
487 master->setup = dw_spi_setup;
488 master->cleanup = dw_spi_cleanup;
489 master->set_cs = dw_spi_set_cs;
490 master->transfer_one = dw_spi_transfer_one;
491 master->handle_err = dw_spi_handle_err;
492 master->max_speed_hz = dws->max_freq;
493 master->dev.of_node = dev->of_node;
494 master->dev.fwnode = dev->fwnode;
495 master->flags = SPI_MASTER_GPIO_SS;
496
497 if (dws->set_cs)
498 master->set_cs = dws->set_cs;
499
500 /* Basic HW init */
501 spi_hw_init(dev, dws);
502
503 if (dws->dma_ops && dws->dma_ops->dma_init) {
504 ret = dws->dma_ops->dma_init(dws);
505 if (ret) {
506 dev_warn(dev, "DMA init failed\n");
507 dws->dma_inited = 0;
508 } else {
509 master->can_dma = dws->dma_ops->can_dma;
510 }
511 }
512
513 ret = devm_spi_register_controller(dev, master);
514 if (ret) {
515 dev_err(&master->dev, "problem registering spi master\n");
516 goto err_dma_exit;
517 }
518
519 dw_spi_debugfs_init(dws);
520 return 0;
521
522 err_dma_exit:
523 if (dws->dma_ops && dws->dma_ops->dma_exit)
524 dws->dma_ops->dma_exit(dws);
525 spi_enable_chip(dws, 0);
526 free_irq(dws->irq, master);
527 err_free_master:
528 spi_controller_put(master);
529 return ret;
530 }
531 EXPORT_SYMBOL_GPL(dw_spi_add_host);
532
dw_spi_remove_host(struct dw_spi * dws)533 void dw_spi_remove_host(struct dw_spi *dws)
534 {
535 dw_spi_debugfs_remove(dws);
536
537 if (dws->dma_ops && dws->dma_ops->dma_exit)
538 dws->dma_ops->dma_exit(dws);
539
540 spi_shutdown_chip(dws);
541
542 free_irq(dws->irq, dws->master);
543 }
544 EXPORT_SYMBOL_GPL(dw_spi_remove_host);
545
dw_spi_suspend_host(struct dw_spi * dws)546 int dw_spi_suspend_host(struct dw_spi *dws)
547 {
548 int ret;
549
550 ret = spi_controller_suspend(dws->master);
551 if (ret)
552 return ret;
553
554 spi_shutdown_chip(dws);
555 return 0;
556 }
557 EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
558
dw_spi_resume_host(struct dw_spi * dws)559 int dw_spi_resume_host(struct dw_spi *dws)
560 {
561 spi_hw_init(&dws->master->dev, dws);
562 return spi_controller_resume(dws->master);
563 }
564 EXPORT_SYMBOL_GPL(dw_spi_resume_host);
565
566 MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
567 MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
568 MODULE_LICENSE("GPL v2");
569