Lines Matching +full:spi +full:- +full:rx +full:- +full:delay +full:- +full:us
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Driver for Broadcom BCM2835 SPI Controllers
10 * spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
11 * spi-atmel.c, Copyright (C) 2006 Atmel Corporation
17 #include <linux/delay.h>
18 #include <linux/dma-mapping.h>
32 #include <linux/spi/spi.h>
34 /* SPI register offsets */
75 #define DRV_NAME "spi-bcm2835"
81 "time in us to run a transfer in polling mode\n");
84 * struct bcm2835_spi - BCM2835 SPI controller
87 * @irq: interrupt, signals TX FIFO empty or RX FIFO ¾ full
88 * @tfr: SPI transfer currently processed
95 * @rx_prologue: bytes received without DMA if first RX sglist entry's
98 * @prepare_cs: precalculated CS register value for ->prepare_message()
99 * (uses slave-specific clock polarity and phase settings)
100 * @debugfs_dir: the debugfs directory - neede to remove debugfs when
109 * @chip_select: SPI slave currently selected
112 * @rx_dma_active: whether a RX DMA descriptor is in progress
114 * @fill_tx_desc: preallocated TX DMA descriptor used for RX-only transfers
117 * @clear_rx_desc: preallocated RX DMA descriptor used for TX-only transfers
118 * (cyclically clears RX FIFO by writing @clear_rx_cs to CS register)
120 * @clear_rx_cs: precalculated CS register value to clear RX FIFO
121 * (uses slave-specific clock polarity and phase settings)
161 snprintf(name, sizeof(name), "spi-bcm2835-%s", dname); in bcm2835_debugfs_create()
165 bs->debugfs_dir = dir; in bcm2835_debugfs_create()
169 &bs->count_transfer_polling); in bcm2835_debugfs_create()
171 &bs->count_transfer_irq); in bcm2835_debugfs_create()
173 &bs->count_transfer_irq_after_polling); in bcm2835_debugfs_create()
175 &bs->count_transfer_dma); in bcm2835_debugfs_create()
180 debugfs_remove_recursive(bs->debugfs_dir); in bcm2835_debugfs_remove()
181 bs->debugfs_dir = NULL; in bcm2835_debugfs_remove()
196 return readl(bs->regs + reg); in bcm2835_rd()
201 writel(val, bs->regs + reg); in bcm2835_wr()
208 while ((bs->rx_len) && in bcm2835_rd_fifo()
211 if (bs->rx_buf) in bcm2835_rd_fifo()
212 *bs->rx_buf++ = byte; in bcm2835_rd_fifo()
213 bs->rx_len--; in bcm2835_rd_fifo()
221 while ((bs->tx_len) && in bcm2835_wr_fifo()
223 byte = bs->tx_buf ? *bs->tx_buf++ : 0; in bcm2835_wr_fifo()
225 bs->tx_len--; in bcm2835_wr_fifo()
230 * bcm2835_rd_fifo_count() - blindly read exactly @count bytes from RX FIFO
231 * @bs: BCM2835 SPI controller
232 * @count: bytes to read from RX FIFO
234 * The caller must ensure that @bs->rx_len is greater than or equal to @count,
235 * that the RX FIFO contains at least @count bytes and that the DMA Enable flag
237 * 32-bit instead of just 8-bit). Moreover @bs->rx_buf must not be %NULL.
244 bs->rx_len -= count; in bcm2835_rd_fifo_count()
249 memcpy(bs->rx_buf, &val, len); in bcm2835_rd_fifo_count()
250 bs->rx_buf += len; in bcm2835_rd_fifo_count()
251 count -= 4; in bcm2835_rd_fifo_count()
256 * bcm2835_wr_fifo_count() - blindly write exactly @count bytes to TX FIFO
257 * @bs: BCM2835 SPI controller
260 * The caller must ensure that @bs->tx_len is greater than or equal to @count,
263 * 32-bit instead of just 8-bit).
270 bs->tx_len -= count; in bcm2835_wr_fifo_count()
273 if (bs->tx_buf) { in bcm2835_wr_fifo_count()
275 memcpy(&val, bs->tx_buf, len); in bcm2835_wr_fifo_count()
276 bs->tx_buf += len; in bcm2835_wr_fifo_count()
281 count -= 4; in bcm2835_wr_fifo_count()
286 * bcm2835_wait_tx_fifo_empty() - busy-wait for TX FIFO to empty
287 * @bs: BCM2835 SPI controller
289 * The caller must ensure that the RX FIFO can accommodate as many bytes
291 * RX FIFO is full, causing this function to spin forever.
300 * bcm2835_rd_fifo_blind() - blindly read up to @count bytes from RX FIFO
301 * @bs: BCM2835 SPI controller
302 * @count: bytes available for reading in RX FIFO
308 count = min(count, bs->rx_len); in bcm2835_rd_fifo_blind()
309 bs->rx_len -= count; in bcm2835_rd_fifo_blind()
313 if (bs->rx_buf) in bcm2835_rd_fifo_blind()
314 *bs->rx_buf++ = val; in bcm2835_rd_fifo_blind()
315 count--; in bcm2835_rd_fifo_blind()
320 * bcm2835_wr_fifo_blind() - blindly write up to @count bytes to TX FIFO
321 * @bs: BCM2835 SPI controller
328 count = min(count, bs->tx_len); in bcm2835_wr_fifo_blind()
329 bs->tx_len -= count; in bcm2835_wr_fifo_blind()
332 val = bs->tx_buf ? *bs->tx_buf++ : 0; in bcm2835_wr_fifo_blind()
334 count--; in bcm2835_wr_fifo_blind()
343 /* Disable SPI interrupts and transfer */ in bcm2835_spi_reset_hw()
355 /* and reset RX/TX FIFOS */ in bcm2835_spi_reset_hw()
372 * or if RXR is set (RX FIFO >= ¾ full). in bcm2835_spi_interrupt()
379 if (bs->tx_len && cs & BCM2835_SPI_CS_DONE) in bcm2835_spi_interrupt()
387 if (!bs->rx_len) { in bcm2835_spi_interrupt()
388 /* Transfer complete - reset SPI HW */ in bcm2835_spi_interrupt()
391 complete(&ctlr->xfer_completion); in bcm2835_spi_interrupt()
398 struct spi_device *spi, in bcm2835_spi_transfer_one_irq() argument
405 bs->count_transfer_irq++; in bcm2835_spi_transfer_one_irq()
427 * bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA
428 * @ctlr: SPI master controller
429 * @tfr: SPI transfer
430 * @bs: BCM2835 SPI controller
435 * SPI controller deduces its intended size from the DLEN register.
437 * If a TX or RX sglist contains multiple entries, one per page, and the first
449 * rounded up a to a multiple of 4 bytes by transmitting surplus bytes, an RX
453 * E.g. if the first TX sglist entry's length is 23 and the first RX's is 42,
454 * write 3 bytes to the TX FIFO but read only 2 bytes from the RX FIFO.
455 * The residue of 1 byte in the RX FIFO is picked up by DMA. Together with
456 * the rest of the first RX sglist entry it makes up a multiple of 4 bytes.
458 * Should the RX prologue be larger, say, 3 vis-à-vis a TX prologue of 1,
459 * write 1 + 4 = 5 bytes to the TX FIFO and read 3 bytes from the RX FIFO.
466 * The FIFO is normally accessed with 8-bit width by the CPU and 32-bit width
469 * be transmitted in 32-bit width to ensure that the following DMA transfer can
470 * pick up the residue in the RX FIFO in ungarbled form.
479 bs->tfr = tfr; in bcm2835_spi_transfer_prologue()
480 bs->tx_prologue = 0; in bcm2835_spi_transfer_prologue()
481 bs->rx_prologue = 0; in bcm2835_spi_transfer_prologue()
482 bs->tx_spillover = false; in bcm2835_spi_transfer_prologue()
484 if (bs->tx_buf && !sg_is_last(&tfr->tx_sg.sgl[0])) in bcm2835_spi_transfer_prologue()
485 bs->tx_prologue = sg_dma_len(&tfr->tx_sg.sgl[0]) & 3; in bcm2835_spi_transfer_prologue()
487 if (bs->rx_buf && !sg_is_last(&tfr->rx_sg.sgl[0])) { in bcm2835_spi_transfer_prologue()
488 bs->rx_prologue = sg_dma_len(&tfr->rx_sg.sgl[0]) & 3; in bcm2835_spi_transfer_prologue()
490 if (bs->rx_prologue > bs->tx_prologue) { in bcm2835_spi_transfer_prologue()
491 if (!bs->tx_buf || sg_is_last(&tfr->tx_sg.sgl[0])) { in bcm2835_spi_transfer_prologue()
492 bs->tx_prologue = bs->rx_prologue; in bcm2835_spi_transfer_prologue()
494 bs->tx_prologue += 4; in bcm2835_spi_transfer_prologue()
495 bs->tx_spillover = in bcm2835_spi_transfer_prologue()
496 !(sg_dma_len(&tfr->tx_sg.sgl[0]) & ~3); in bcm2835_spi_transfer_prologue()
502 if (!bs->tx_prologue) in bcm2835_spi_transfer_prologue()
505 /* Write and read RX prologue. Adjust first entry in RX sglist. */ in bcm2835_spi_transfer_prologue()
506 if (bs->rx_prologue) { in bcm2835_spi_transfer_prologue()
507 bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->rx_prologue); in bcm2835_spi_transfer_prologue()
510 bcm2835_wr_fifo_count(bs, bs->rx_prologue); in bcm2835_spi_transfer_prologue()
512 bcm2835_rd_fifo_count(bs, bs->rx_prologue); in bcm2835_spi_transfer_prologue()
517 dma_sync_single_for_device(ctlr->dma_rx->device->dev, in bcm2835_spi_transfer_prologue()
518 sg_dma_address(&tfr->rx_sg.sgl[0]), in bcm2835_spi_transfer_prologue()
519 bs->rx_prologue, DMA_FROM_DEVICE); in bcm2835_spi_transfer_prologue()
521 sg_dma_address(&tfr->rx_sg.sgl[0]) += bs->rx_prologue; in bcm2835_spi_transfer_prologue()
522 sg_dma_len(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue; in bcm2835_spi_transfer_prologue()
525 if (!bs->tx_buf) in bcm2835_spi_transfer_prologue()
532 tx_remaining = bs->tx_prologue - bs->rx_prologue; in bcm2835_spi_transfer_prologue()
543 if (likely(!bs->tx_spillover)) { in bcm2835_spi_transfer_prologue()
544 sg_dma_address(&tfr->tx_sg.sgl[0]) += bs->tx_prologue; in bcm2835_spi_transfer_prologue()
545 sg_dma_len(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue; in bcm2835_spi_transfer_prologue()
547 sg_dma_len(&tfr->tx_sg.sgl[0]) = 0; in bcm2835_spi_transfer_prologue()
548 sg_dma_address(&tfr->tx_sg.sgl[1]) += 4; in bcm2835_spi_transfer_prologue()
549 sg_dma_len(&tfr->tx_sg.sgl[1]) -= 4; in bcm2835_spi_transfer_prologue()
554 * bcm2835_spi_undo_prologue() - reconstruct original sglist state
555 * @bs: BCM2835 SPI controller
557 * Undo changes which were made to an SPI transfer's sglist when transmitting
563 struct spi_transfer *tfr = bs->tfr; in bcm2835_spi_undo_prologue()
565 if (!bs->tx_prologue) in bcm2835_spi_undo_prologue()
568 if (bs->rx_prologue) { in bcm2835_spi_undo_prologue()
569 sg_dma_address(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue; in bcm2835_spi_undo_prologue()
570 sg_dma_len(&tfr->rx_sg.sgl[0]) += bs->rx_prologue; in bcm2835_spi_undo_prologue()
573 if (!bs->tx_buf) in bcm2835_spi_undo_prologue()
576 if (likely(!bs->tx_spillover)) { in bcm2835_spi_undo_prologue()
577 sg_dma_address(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue; in bcm2835_spi_undo_prologue()
578 sg_dma_len(&tfr->tx_sg.sgl[0]) += bs->tx_prologue; in bcm2835_spi_undo_prologue()
580 sg_dma_len(&tfr->tx_sg.sgl[0]) = bs->tx_prologue - 4; in bcm2835_spi_undo_prologue()
581 sg_dma_address(&tfr->tx_sg.sgl[1]) -= 4; in bcm2835_spi_undo_prologue()
582 sg_dma_len(&tfr->tx_sg.sgl[1]) += 4; in bcm2835_spi_undo_prologue()
585 bs->tx_prologue = 0; in bcm2835_spi_undo_prologue()
589 * bcm2835_spi_dma_rx_done() - callback for DMA RX channel
590 * @data: SPI master controller
592 * Used for bidirectional and RX-only transfers.
599 /* terminate tx-dma as we do not have an irq for it in bcm2835_spi_dma_rx_done()
600 * because when the rx dma will terminate and this callback in bcm2835_spi_dma_rx_done()
601 * is called the tx-dma must have finished - can't get to this in bcm2835_spi_dma_rx_done()
604 dmaengine_terminate_async(ctlr->dma_tx); in bcm2835_spi_dma_rx_done()
605 bs->tx_dma_active = false; in bcm2835_spi_dma_rx_done()
606 bs->rx_dma_active = false; in bcm2835_spi_dma_rx_done()
613 complete(&ctlr->xfer_completion); in bcm2835_spi_dma_rx_done()
617 * bcm2835_spi_dma_tx_done() - callback for DMA TX channel
618 * @data: SPI master controller
620 * Used for TX-only transfers.
627 /* busy-wait for TX FIFO to empty */ in bcm2835_spi_dma_tx_done()
630 bs->clear_rx_cs[bs->chip_select]); in bcm2835_spi_dma_tx_done()
632 bs->tx_dma_active = false; in bcm2835_spi_dma_tx_done()
636 * In case of a very short transfer, RX DMA may not have been in bcm2835_spi_dma_tx_done()
640 if (cmpxchg(&bs->rx_dma_active, true, false)) in bcm2835_spi_dma_tx_done()
641 dmaengine_terminate_async(ctlr->dma_rx); in bcm2835_spi_dma_tx_done()
645 complete(&ctlr->xfer_completion); in bcm2835_spi_dma_tx_done()
649 * bcm2835_spi_prepare_sg() - prepare and submit DMA descriptor for sglist
650 * @ctlr: SPI master controller
651 * @spi: SPI slave
652 * @tfr: SPI transfer
653 * @bs: BCM2835 SPI controller
654 * @is_tx: whether to submit DMA descriptor for TX or RX sglist
656 * Prepare and submit a DMA descriptor for the TX or RX sglist of @tfr.
660 struct spi_device *spi, in bcm2835_spi_prepare_sg() argument
676 chan = ctlr->dma_tx; in bcm2835_spi_prepare_sg()
677 nents = tfr->tx_sg.nents; in bcm2835_spi_prepare_sg()
678 sgl = tfr->tx_sg.sgl; in bcm2835_spi_prepare_sg()
679 flags = tfr->rx_buf ? 0 : DMA_PREP_INTERRUPT; in bcm2835_spi_prepare_sg()
682 chan = ctlr->dma_rx; in bcm2835_spi_prepare_sg()
683 nents = tfr->rx_sg.nents; in bcm2835_spi_prepare_sg()
684 sgl = tfr->rx_sg.sgl; in bcm2835_spi_prepare_sg()
690 return -EINVAL; in bcm2835_spi_prepare_sg()
693 * Completion is signaled by the RX channel for bidirectional and in bcm2835_spi_prepare_sg()
694 * RX-only transfers; else by the TX channel for TX-only transfers. in bcm2835_spi_prepare_sg()
697 desc->callback = bcm2835_spi_dma_rx_done; in bcm2835_spi_prepare_sg()
698 desc->callback_param = ctlr; in bcm2835_spi_prepare_sg()
699 } else if (!tfr->rx_buf) { in bcm2835_spi_prepare_sg()
700 desc->callback = bcm2835_spi_dma_tx_done; in bcm2835_spi_prepare_sg()
701 desc->callback_param = ctlr; in bcm2835_spi_prepare_sg()
702 bs->chip_select = spi->chip_select; in bcm2835_spi_prepare_sg()
705 /* submit it to DMA-engine */ in bcm2835_spi_prepare_sg()
712 * bcm2835_spi_transfer_one_dma() - perform SPI transfer using DMA engine
713 * @ctlr: SPI master controller
714 * @spi: SPI slave
715 * @tfr: SPI transfer
718 * For *bidirectional* transfers (both tx_buf and rx_buf are non-%NULL), set up
719 * the TX and RX DMA channel to copy between memory and FIFO register.
721 * For *TX-only* transfers (rx_buf is %NULL), copying the RX FIFO's contents to
722 * memory is pointless. However not reading the RX FIFO isn't an option either
724 * clear the RX FIFO by setting the CLEAR_RX bit in the CS register.
729 * when performing a TX-only transfer is to submit this descriptor to the RX
734 * Clearing the RX FIFO is paced by the DREQ signal. The signal is asserted
735 * when the RX FIFO becomes half full, i.e. 32 bytes. (Tuneable with the DC
736 * register.) Reading 32 bytes from the RX FIFO would normally require 8 bus
737 * accesses, whereas clearing it requires only 1 bus access. So an 8-fold
740 * For *RX-only* transfers (tx_buf is %NULL), fill the TX FIFO by cyclically
742 * in bcm2835_dma_init(). It must be terminated once the RX DMA channel is
748 * feature is not available on so-called "lite" channels, but normally TX DMA
749 * is backed by a full-featured channel.
751 * Zero-filling the TX FIFO is paced by the DREQ signal. Unfortunately the
752 * BCM2835 SPI controller continues to assert DREQ even after the DLEN register
754 * has finished, the DMA engine zero-fills the TX FIFO until it is half full.
756 * performed at the end of an RX-only transfer.
759 struct spi_device *spi, in bcm2835_spi_transfer_one_dma() argument
768 bs->count_transfer_dma++; in bcm2835_spi_transfer_one_dma()
771 * Transfer first few bytes without DMA if length of first TX or RX in bcm2835_spi_transfer_one_dma()
776 /* setup tx-DMA */ in bcm2835_spi_transfer_one_dma()
777 if (bs->tx_buf) { in bcm2835_spi_transfer_one_dma()
778 ret = bcm2835_spi_prepare_sg(ctlr, spi, tfr, bs, true); in bcm2835_spi_transfer_one_dma()
780 cookie = dmaengine_submit(bs->fill_tx_desc); in bcm2835_spi_transfer_one_dma()
787 bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->tx_len); in bcm2835_spi_transfer_one_dma()
793 bs->tx_dma_active = true; in bcm2835_spi_transfer_one_dma()
797 dma_async_issue_pending(ctlr->dma_tx); in bcm2835_spi_transfer_one_dma()
799 /* setup rx-DMA late - to run transfers while in bcm2835_spi_transfer_one_dma()
800 * mapping of the rx buffers still takes place in bcm2835_spi_transfer_one_dma()
801 * this saves 10us or more. in bcm2835_spi_transfer_one_dma()
803 if (bs->rx_buf) { in bcm2835_spi_transfer_one_dma()
804 ret = bcm2835_spi_prepare_sg(ctlr, spi, tfr, bs, false); in bcm2835_spi_transfer_one_dma()
806 cookie = dmaengine_submit(bs->clear_rx_desc[spi->chip_select]); in bcm2835_spi_transfer_one_dma()
811 dmaengine_terminate_sync(ctlr->dma_tx); in bcm2835_spi_transfer_one_dma()
812 bs->tx_dma_active = false; in bcm2835_spi_transfer_one_dma()
816 /* start rx dma late */ in bcm2835_spi_transfer_one_dma()
817 dma_async_issue_pending(ctlr->dma_rx); in bcm2835_spi_transfer_one_dma()
818 bs->rx_dma_active = true; in bcm2835_spi_transfer_one_dma()
822 * In case of a very short TX-only transfer, bcm2835_spi_dma_tx_done() in bcm2835_spi_transfer_one_dma()
823 * may run before RX DMA is issued. Terminate RX DMA if so. in bcm2835_spi_transfer_one_dma()
825 if (!bs->rx_buf && !bs->tx_dma_active && in bcm2835_spi_transfer_one_dma()
826 cmpxchg(&bs->rx_dma_active, true, false)) { in bcm2835_spi_transfer_one_dma()
827 dmaengine_terminate_async(ctlr->dma_rx); in bcm2835_spi_transfer_one_dma()
841 struct spi_device *spi, in bcm2835_spi_can_dma() argument
845 if (tfr->len < BCM2835_SPI_DMA_MIN_LENGTH) in bcm2835_spi_can_dma()
857 if (ctlr->dma_tx) { in bcm2835_dma_release()
858 dmaengine_terminate_sync(ctlr->dma_tx); in bcm2835_dma_release()
860 if (bs->fill_tx_desc) in bcm2835_dma_release()
861 dmaengine_desc_free(bs->fill_tx_desc); in bcm2835_dma_release()
863 if (bs->fill_tx_addr) in bcm2835_dma_release()
864 dma_unmap_page_attrs(ctlr->dma_tx->device->dev, in bcm2835_dma_release()
865 bs->fill_tx_addr, sizeof(u32), in bcm2835_dma_release()
869 dma_release_channel(ctlr->dma_tx); in bcm2835_dma_release()
870 ctlr->dma_tx = NULL; in bcm2835_dma_release()
873 if (ctlr->dma_rx) { in bcm2835_dma_release()
874 dmaengine_terminate_sync(ctlr->dma_rx); in bcm2835_dma_release()
877 if (bs->clear_rx_desc[i]) in bcm2835_dma_release()
878 dmaengine_desc_free(bs->clear_rx_desc[i]); in bcm2835_dma_release()
880 if (bs->clear_rx_addr) in bcm2835_dma_release()
881 dma_unmap_single(ctlr->dma_rx->device->dev, in bcm2835_dma_release()
882 bs->clear_rx_addr, in bcm2835_dma_release()
883 sizeof(bs->clear_rx_cs), in bcm2835_dma_release()
886 dma_release_channel(ctlr->dma_rx); in bcm2835_dma_release()
887 ctlr->dma_rx = NULL; in bcm2835_dma_release()
899 /* base address in dma-space */ in bcm2835_dma_init()
900 addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL); in bcm2835_dma_init()
902 dev_err(dev, "could not get DMA-register address - not using dma mode\n"); in bcm2835_dma_init()
907 /* get tx/rx dma */ in bcm2835_dma_init()
908 ctlr->dma_tx = dma_request_slave_channel(dev, "tx"); in bcm2835_dma_init()
909 if (!ctlr->dma_tx) { in bcm2835_dma_init()
910 dev_err(dev, "no tx-dma configuration found - not using dma mode\n"); in bcm2835_dma_init()
913 ctlr->dma_rx = dma_request_slave_channel(dev, "rx"); in bcm2835_dma_init()
914 if (!ctlr->dma_rx) { in bcm2835_dma_init()
915 dev_err(dev, "no rx-dma configuration found - not using dma mode\n"); in bcm2835_dma_init()
921 * or, in case of an RX-only transfer, cyclically copies from the zero in bcm2835_dma_init()
927 ret = dmaengine_slave_config(ctlr->dma_tx, &slave_config); in bcm2835_dma_init()
931 bs->fill_tx_addr = dma_map_page_attrs(ctlr->dma_tx->device->dev, in bcm2835_dma_init()
935 if (dma_mapping_error(ctlr->dma_tx->device->dev, bs->fill_tx_addr)) { in bcm2835_dma_init()
936 dev_err(dev, "cannot map zero page - not using DMA mode\n"); in bcm2835_dma_init()
937 bs->fill_tx_addr = 0; in bcm2835_dma_init()
941 bs->fill_tx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_tx, in bcm2835_dma_init()
942 bs->fill_tx_addr, in bcm2835_dma_init()
945 if (!bs->fill_tx_desc) { in bcm2835_dma_init()
946 dev_err(dev, "cannot prepare fill_tx_desc - not using DMA mode\n"); in bcm2835_dma_init()
950 ret = dmaengine_desc_set_reuse(bs->fill_tx_desc); in bcm2835_dma_init()
952 dev_err(dev, "cannot reuse fill_tx_desc - not using DMA mode\n"); in bcm2835_dma_init()
957 * The RX DMA channel is used bidirectionally: It either reads the in bcm2835_dma_init()
958 * RX FIFO or, in case of a TX-only transfer, cyclically writes a in bcm2835_dma_init()
959 * precalculated value to the CS register to clear the RX FIFO. in bcm2835_dma_init()
966 ret = dmaengine_slave_config(ctlr->dma_rx, &slave_config); in bcm2835_dma_init()
970 bs->clear_rx_addr = dma_map_single(ctlr->dma_rx->device->dev, in bcm2835_dma_init()
971 bs->clear_rx_cs, in bcm2835_dma_init()
972 sizeof(bs->clear_rx_cs), in bcm2835_dma_init()
974 if (dma_mapping_error(ctlr->dma_rx->device->dev, bs->clear_rx_addr)) { in bcm2835_dma_init()
975 dev_err(dev, "cannot map clear_rx_cs - not using DMA mode\n"); in bcm2835_dma_init()
976 bs->clear_rx_addr = 0; in bcm2835_dma_init()
981 bs->clear_rx_desc[i] = dmaengine_prep_dma_cyclic(ctlr->dma_rx, in bcm2835_dma_init()
982 bs->clear_rx_addr + i * sizeof(u32), in bcm2835_dma_init()
985 if (!bs->clear_rx_desc[i]) { in bcm2835_dma_init()
986 dev_err(dev, "cannot prepare clear_rx_desc - not using DMA mode\n"); in bcm2835_dma_init()
990 ret = dmaengine_desc_set_reuse(bs->clear_rx_desc[i]); in bcm2835_dma_init()
992 dev_err(dev, "cannot reuse clear_rx_desc - not using DMA mode\n"); in bcm2835_dma_init()
998 ctlr->can_dma = bcm2835_spi_can_dma; in bcm2835_dma_init()
1003 dev_err(dev, "issue configuring dma: %d - not using DMA mode\n", in bcm2835_dma_init()
1012 struct spi_device *spi, in bcm2835_spi_transfer_one_poll() argument
1020 bs->count_transfer_polling++; in bcm2835_spi_transfer_one_poll()
1035 while (bs->rx_len) { in bcm2835_spi_transfer_one_poll()
1045 if (bs->rx_len && time_after(jiffies, timeout)) { in bcm2835_spi_transfer_one_poll()
1046 dev_dbg_ratelimited(&spi->dev, in bcm2835_spi_transfer_one_poll()
1047 … "timeout period reached: jiffies: %lu remaining tx/rx: %d/%d - falling back to interrupt mode\n", in bcm2835_spi_transfer_one_poll()
1048 jiffies - timeout, in bcm2835_spi_transfer_one_poll()
1049 bs->tx_len, bs->rx_len); in bcm2835_spi_transfer_one_poll()
1053 bs->count_transfer_irq_after_polling++; in bcm2835_spi_transfer_one_poll()
1055 return bcm2835_spi_transfer_one_irq(ctlr, spi, in bcm2835_spi_transfer_one_poll()
1060 /* Transfer complete - reset SPI HW */ in bcm2835_spi_transfer_one_poll()
1067 struct spi_device *spi, in bcm2835_spi_transfer_one() argument
1073 u32 cs = bs->prepare_cs[spi->chip_select]; in bcm2835_spi_transfer_one()
1076 spi_hz = tfr->speed_hz; in bcm2835_spi_transfer_one()
1077 clk_hz = clk_get_rate(bs->clk); in bcm2835_spi_transfer_one()
1094 /* handle all the 3-wire mode */ in bcm2835_spi_transfer_one()
1095 if (spi->mode & SPI_3WIRE && tfr->rx_buf) in bcm2835_spi_transfer_one()
1099 bs->tx_buf = tfr->tx_buf; in bcm2835_spi_transfer_one()
1100 bs->rx_buf = tfr->rx_buf; in bcm2835_spi_transfer_one()
1101 bs->tx_len = tfr->len; in bcm2835_spi_transfer_one()
1102 bs->rx_len = tfr->len; in bcm2835_spi_transfer_one()
1104 /* Calculate the estimated time in us the transfer runs. Note that in bcm2835_spi_transfer_one()
1107 * per byte per polling limit. E.g., we can transfer 1 byte in 30 us in bcm2835_spi_transfer_one()
1114 if (tfr->len < byte_limit) in bcm2835_spi_transfer_one()
1115 return bcm2835_spi_transfer_one_poll(ctlr, spi, tfr, cs); in bcm2835_spi_transfer_one()
1119 * this 1 idle clock cycle pattern but runs the spi clock without gaps in bcm2835_spi_transfer_one()
1121 if (ctlr->can_dma && bcm2835_spi_can_dma(ctlr, spi, tfr)) in bcm2835_spi_transfer_one()
1122 return bcm2835_spi_transfer_one_dma(ctlr, spi, tfr, cs); in bcm2835_spi_transfer_one()
1124 /* run in interrupt-mode */ in bcm2835_spi_transfer_one()
1125 return bcm2835_spi_transfer_one_irq(ctlr, spi, tfr, cs, true); in bcm2835_spi_transfer_one()
1131 struct spi_device *spi = msg->spi; in bcm2835_spi_prepare_message() local
1135 if (ctlr->can_dma) { in bcm2835_spi_prepare_message()
1138 * the SPI HW due to DLEN. Split up transfers (32-bit FIFO in bcm2835_spi_prepare_message()
1151 bcm2835_wr(bs, BCM2835_SPI_CS, bs->prepare_cs[spi->chip_select]); in bcm2835_spi_prepare_message()
1162 dmaengine_terminate_sync(ctlr->dma_tx); in bcm2835_spi_handle_err()
1163 bs->tx_dma_active = false; in bcm2835_spi_handle_err()
1164 dmaengine_terminate_sync(ctlr->dma_rx); in bcm2835_spi_handle_err()
1165 bs->rx_dma_active = false; in bcm2835_spi_handle_err()
1174 return !strcmp(chip->label, data); in chip_match_name()
1177 static int bcm2835_spi_setup(struct spi_device *spi) in bcm2835_spi_setup() argument
1179 struct spi_controller *ctlr = spi->controller; in bcm2835_spi_setup()
1186 * Precalculate SPI slave's CS register value for ->prepare_message(): in bcm2835_spi_setup()
1187 * The driver always uses software-controlled GPIO chip select, hence in bcm2835_spi_setup()
1188 * set the hardware-controlled native chip select to an invalid value in bcm2835_spi_setup()
1192 if (spi->mode & SPI_CPOL) in bcm2835_spi_setup()
1194 if (spi->mode & SPI_CPHA) in bcm2835_spi_setup()
1196 bs->prepare_cs[spi->chip_select] = cs; in bcm2835_spi_setup()
1199 * Precalculate SPI slave's CS register value to clear RX FIFO in bcm2835_spi_setup()
1200 * in case of a TX-only DMA transfer. in bcm2835_spi_setup()
1202 if (ctlr->dma_rx) { in bcm2835_spi_setup()
1203 bs->clear_rx_cs[spi->chip_select] = cs | in bcm2835_spi_setup()
1207 dma_sync_single_for_device(ctlr->dma_rx->device->dev, in bcm2835_spi_setup()
1208 bs->clear_rx_addr, in bcm2835_spi_setup()
1209 sizeof(bs->clear_rx_cs), in bcm2835_spi_setup()
1214 * sanity checking the native-chipselects in bcm2835_spi_setup()
1216 if (spi->mode & SPI_NO_CS) in bcm2835_spi_setup()
1219 * The SPI core has successfully requested the CS GPIO line from the in bcm2835_spi_setup()
1222 if (spi->cs_gpiod) in bcm2835_spi_setup()
1224 if (spi->chip_select > 1) { in bcm2835_spi_setup()
1229 dev_err(&spi->dev, in bcm2835_spi_setup()
1230 "setup: only two native chip-selects are supported\n"); in bcm2835_spi_setup()
1231 return -EINVAL; in bcm2835_spi_setup()
1239 * and fix it. Why is the GPIO descriptor in spi->cs_gpiod in bcm2835_spi_setup()
1244 chip = gpiochip_find("pinctrl-bcm2835", chip_match_name); in bcm2835_spi_setup()
1253 * on spi->mode cannot be checked for polarity in this case in bcm2835_spi_setup()
1256 if (of_property_read_bool(spi->dev.of_node, "spi-cs-high")) in bcm2835_spi_setup()
1260 spi->cs_gpiod = gpiochip_request_own_desc(chip, 8 - spi->chip_select, in bcm2835_spi_setup()
1264 if (IS_ERR(spi->cs_gpiod)) in bcm2835_spi_setup()
1265 return PTR_ERR(spi->cs_gpiod); in bcm2835_spi_setup()
1268 dev_info(&spi->dev, "setting up native-CS%i to use GPIO\n", in bcm2835_spi_setup()
1269 spi->chip_select); in bcm2835_spi_setup()
1280 ctlr = spi_alloc_master(&pdev->dev, ALIGN(sizeof(*bs), in bcm2835_spi_probe()
1283 return -ENOMEM; in bcm2835_spi_probe()
1287 ctlr->use_gpio_descriptors = true; in bcm2835_spi_probe()
1288 ctlr->mode_bits = BCM2835_SPI_MODE_BITS; in bcm2835_spi_probe()
1289 ctlr->bits_per_word_mask = SPI_BPW_MASK(8); in bcm2835_spi_probe()
1290 ctlr->num_chipselect = BCM2835_SPI_NUM_CS; in bcm2835_spi_probe()
1291 ctlr->setup = bcm2835_spi_setup; in bcm2835_spi_probe()
1292 ctlr->transfer_one = bcm2835_spi_transfer_one; in bcm2835_spi_probe()
1293 ctlr->handle_err = bcm2835_spi_handle_err; in bcm2835_spi_probe()
1294 ctlr->prepare_message = bcm2835_spi_prepare_message; in bcm2835_spi_probe()
1295 ctlr->dev.of_node = pdev->dev.of_node; in bcm2835_spi_probe()
1299 bs->regs = devm_platform_ioremap_resource(pdev, 0); in bcm2835_spi_probe()
1300 if (IS_ERR(bs->regs)) { in bcm2835_spi_probe()
1301 err = PTR_ERR(bs->regs); in bcm2835_spi_probe()
1305 bs->clk = devm_clk_get(&pdev->dev, NULL); in bcm2835_spi_probe()
1306 if (IS_ERR(bs->clk)) { in bcm2835_spi_probe()
1307 err = PTR_ERR(bs->clk); in bcm2835_spi_probe()
1308 dev_err(&pdev->dev, "could not get clk: %d\n", err); in bcm2835_spi_probe()
1312 bs->irq = platform_get_irq(pdev, 0); in bcm2835_spi_probe()
1313 if (bs->irq <= 0) { in bcm2835_spi_probe()
1314 err = bs->irq ? bs->irq : -ENODEV; in bcm2835_spi_probe()
1318 clk_prepare_enable(bs->clk); in bcm2835_spi_probe()
1320 bcm2835_dma_init(ctlr, &pdev->dev, bs); in bcm2835_spi_probe()
1326 err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0, in bcm2835_spi_probe()
1327 dev_name(&pdev->dev), ctlr); in bcm2835_spi_probe()
1329 dev_err(&pdev->dev, "could not request IRQ: %d\n", err); in bcm2835_spi_probe()
1333 err = devm_spi_register_controller(&pdev->dev, ctlr); in bcm2835_spi_probe()
1335 dev_err(&pdev->dev, "could not register SPI controller: %d\n", in bcm2835_spi_probe()
1340 bcm2835_debugfs_create(bs, dev_name(&pdev->dev)); in bcm2835_spi_probe()
1345 clk_disable_unprepare(bs->clk); in bcm2835_spi_probe()
1362 clk_disable_unprepare(bs->clk); in bcm2835_spi_remove()
1370 { .compatible = "brcm,bcm2835-spi", },
1385 MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835");