Lines Matching +full:g +full:- +full:rx +full:- +full:fifo +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0-or-later
10 * spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
11 * spi-atmel.c, Copyright (C) 2006 Atmel Corporation
18 #include <linux/dma-mapping.h>
74 #define DRV_NAME "spi-bcm2835"
83 * struct bcm2835_spi - BCM2835 SPI controller
87 * @irq: interrupt, signals TX FIFO empty or RX FIFO ¾ full
96 * @rx_prologue: bytes received without DMA if first RX sglist entry's
99 * @debugfs_dir: the debugfs directory - neede to remove debugfs when
111 * @rx_dma_active: whether a RX DMA descriptor is in progress
113 * @fill_tx_desc: preallocated TX DMA descriptor used for RX-only transfers
114 * (cyclically copies from zero page to TX FIFO)
146 * struct bcm2835_spidev - BCM2835 SPI slave
147 * @prepare_cs: precalculated CS register value for ->prepare_message()
148 * (uses slave-specific clock polarity and phase settings)
149 * @clear_rx_desc: preallocated RX DMA descriptor used for TX-only transfers
150 * (cyclically clears RX FIFO by writing @clear_rx_cs to CS register)
152 * @clear_rx_cs: precalculated CS register value to clear RX FIFO
153 * (uses slave-specific clock polarity and phase settings)
170 snprintf(name, sizeof(name), "spi-bcm2835-%s", dname); in bcm2835_debugfs_create()
174 bs->debugfs_dir = dir; in bcm2835_debugfs_create()
178 &bs->count_transfer_polling); in bcm2835_debugfs_create()
180 &bs->count_transfer_irq); in bcm2835_debugfs_create()
182 &bs->count_transfer_irq_after_polling); in bcm2835_debugfs_create()
184 &bs->count_transfer_dma); in bcm2835_debugfs_create()
189 debugfs_remove_recursive(bs->debugfs_dir); in bcm2835_debugfs_remove()
190 bs->debugfs_dir = NULL; in bcm2835_debugfs_remove()
205 return readl(bs->regs + reg); in bcm2835_rd()
210 writel(val, bs->regs + reg); in bcm2835_wr()
217 while ((bs->rx_len) && in bcm2835_rd_fifo()
220 if (bs->rx_buf) in bcm2835_rd_fifo()
221 *bs->rx_buf++ = byte; in bcm2835_rd_fifo()
222 bs->rx_len--; in bcm2835_rd_fifo()
230 while ((bs->tx_len) && in bcm2835_wr_fifo()
232 byte = bs->tx_buf ? *bs->tx_buf++ : 0; in bcm2835_wr_fifo()
234 bs->tx_len--; in bcm2835_wr_fifo()
239 * bcm2835_rd_fifo_count() - blindly read exactly @count bytes from RX FIFO
241 * @count: bytes to read from RX FIFO
243 * The caller must ensure that @bs->rx_len is greater than or equal to @count,
244 * that the RX FIFO contains at least @count bytes and that the DMA Enable flag
245 * in the CS register is set (such that a read from the FIFO register receives
246 * 32-bit instead of just 8-bit). Moreover @bs->rx_buf must not be %NULL.
253 bs->rx_len -= count; in bcm2835_rd_fifo_count()
258 memcpy(bs->rx_buf, &val, len); in bcm2835_rd_fifo_count()
259 bs->rx_buf += len; in bcm2835_rd_fifo_count()
260 count -= 4; in bcm2835_rd_fifo_count()
265 * bcm2835_wr_fifo_count() - blindly write exactly @count bytes to TX FIFO
267 * @count: bytes to write to TX FIFO
269 * The caller must ensure that @bs->tx_len is greater than or equal to @count,
270 * that the TX FIFO can accommodate @count bytes and that the DMA Enable flag
271 * in the CS register is set (such that a write to the FIFO register transmits
272 * 32-bit instead of just 8-bit).
279 bs->tx_len -= count; in bcm2835_wr_fifo_count()
282 if (bs->tx_buf) { in bcm2835_wr_fifo_count()
284 memcpy(&val, bs->tx_buf, len); in bcm2835_wr_fifo_count()
285 bs->tx_buf += len; in bcm2835_wr_fifo_count()
290 count -= 4; in bcm2835_wr_fifo_count()
295 * bcm2835_wait_tx_fifo_empty() - busy-wait for TX FIFO to empty
298 * The caller must ensure that the RX FIFO can accommodate as many bytes
299 * as have been written to the TX FIFO: Transmission is halted once the
300 * RX FIFO is full, causing this function to spin forever.
309 * bcm2835_rd_fifo_blind() - blindly read up to @count bytes from RX FIFO
311 * @count: bytes available for reading in RX FIFO
317 count = min(count, bs->rx_len); in bcm2835_rd_fifo_blind()
318 bs->rx_len -= count; in bcm2835_rd_fifo_blind()
322 if (bs->rx_buf) in bcm2835_rd_fifo_blind()
323 *bs->rx_buf++ = val; in bcm2835_rd_fifo_blind()
324 } while (--count); in bcm2835_rd_fifo_blind()
328 * bcm2835_wr_fifo_blind() - blindly write up to @count bytes to TX FIFO
330 * @count: bytes available for writing in TX FIFO
336 count = min(count, bs->tx_len); in bcm2835_wr_fifo_blind()
337 bs->tx_len -= count; in bcm2835_wr_fifo_blind()
340 val = bs->tx_buf ? *bs->tx_buf++ : 0; in bcm2835_wr_fifo_blind()
342 } while (--count); in bcm2835_wr_fifo_blind()
361 /* and reset RX/TX FIFOS */ in bcm2835_spi_reset_hw()
380 * An interrupt is signaled either if DONE is set (TX FIFO empty) in bcm2835_spi_interrupt()
381 * or if RXR is set (RX FIFO >= ¾ full). in bcm2835_spi_interrupt()
388 if (bs->tx_len && cs & BCM2835_SPI_CS_DONE) in bcm2835_spi_interrupt()
391 /* Read as many bytes as possible from FIFO */ in bcm2835_spi_interrupt()
393 /* Write as many bytes as possible to FIFO */ in bcm2835_spi_interrupt()
396 if (!bs->rx_len) { in bcm2835_spi_interrupt()
397 /* Transfer complete - reset SPI HW */ in bcm2835_spi_interrupt()
400 spi_finalize_current_transfer(bs->ctlr); in bcm2835_spi_interrupt()
414 bs->count_transfer_irq++; in bcm2835_spi_transfer_one_irq()
418 * Otherwise the empty TX FIFO would immediately trigger an interrupt. in bcm2835_spi_transfer_one_irq()
422 /* fill TX FIFO as much as possible */ in bcm2835_spi_transfer_one_irq()
436 * bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA
442 * A limitation in DMA mode is that the FIFO must be accessed in 4 byte chunks.
444 * SPI controller deduces its intended size from the DLEN register.
446 * If a TX or RX sglist contains multiple entries, one per page, and the first
458 * rounded up a to a multiple of 4 bytes by transmitting surplus bytes, an RX
462 * E.g. if the first TX sglist entry's length is 23 and the first RX's is 42,
463 * write 3 bytes to the TX FIFO but read only 2 bytes from the RX FIFO.
464 * The residue of 1 byte in the RX FIFO is picked up by DMA. Together with
465 * the rest of the first RX sglist entry it makes up a multiple of 4 bytes.
467 * Should the RX prologue be larger, say, 3 vis-à-vis a TX prologue of 1,
468 * write 1 + 4 = 5 bytes to the TX FIFO and read 3 bytes from the RX FIFO.
475 * The FIFO is normally accessed with 8-bit width by the CPU and 32-bit width
477 * the width but also garbles the FIFO's contents. The prologue must therefore
478 * be transmitted in 32-bit width to ensure that the following DMA transfer can
479 * pick up the residue in the RX FIFO in ungarbled form.
488 bs->tfr = tfr; in bcm2835_spi_transfer_prologue()
489 bs->tx_prologue = 0; in bcm2835_spi_transfer_prologue()
490 bs->rx_prologue = 0; in bcm2835_spi_transfer_prologue()
491 bs->tx_spillover = false; in bcm2835_spi_transfer_prologue()
493 if (bs->tx_buf && !sg_is_last(&tfr->tx_sg.sgl[0])) in bcm2835_spi_transfer_prologue()
494 bs->tx_prologue = sg_dma_len(&tfr->tx_sg.sgl[0]) & 3; in bcm2835_spi_transfer_prologue()
496 if (bs->rx_buf && !sg_is_last(&tfr->rx_sg.sgl[0])) { in bcm2835_spi_transfer_prologue()
497 bs->rx_prologue = sg_dma_len(&tfr->rx_sg.sgl[0]) & 3; in bcm2835_spi_transfer_prologue()
499 if (bs->rx_prologue > bs->tx_prologue) { in bcm2835_spi_transfer_prologue()
500 if (!bs->tx_buf || sg_is_last(&tfr->tx_sg.sgl[0])) { in bcm2835_spi_transfer_prologue()
501 bs->tx_prologue = bs->rx_prologue; in bcm2835_spi_transfer_prologue()
503 bs->tx_prologue += 4; in bcm2835_spi_transfer_prologue()
504 bs->tx_spillover = in bcm2835_spi_transfer_prologue()
505 !(sg_dma_len(&tfr->tx_sg.sgl[0]) & ~3); in bcm2835_spi_transfer_prologue()
511 if (!bs->tx_prologue) in bcm2835_spi_transfer_prologue()
514 /* Write and read RX prologue. Adjust first entry in RX sglist. */ in bcm2835_spi_transfer_prologue()
515 if (bs->rx_prologue) { in bcm2835_spi_transfer_prologue()
516 bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->rx_prologue); in bcm2835_spi_transfer_prologue()
519 bcm2835_wr_fifo_count(bs, bs->rx_prologue); in bcm2835_spi_transfer_prologue()
521 bcm2835_rd_fifo_count(bs, bs->rx_prologue); in bcm2835_spi_transfer_prologue()
526 dma_sync_single_for_device(ctlr->dma_rx->device->dev, in bcm2835_spi_transfer_prologue()
527 sg_dma_address(&tfr->rx_sg.sgl[0]), in bcm2835_spi_transfer_prologue()
528 bs->rx_prologue, DMA_FROM_DEVICE); in bcm2835_spi_transfer_prologue()
530 sg_dma_address(&tfr->rx_sg.sgl[0]) += bs->rx_prologue; in bcm2835_spi_transfer_prologue()
531 sg_dma_len(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue; in bcm2835_spi_transfer_prologue()
534 if (!bs->tx_buf) in bcm2835_spi_transfer_prologue()
541 tx_remaining = bs->tx_prologue - bs->rx_prologue; in bcm2835_spi_transfer_prologue()
552 if (likely(!bs->tx_spillover)) { in bcm2835_spi_transfer_prologue()
553 sg_dma_address(&tfr->tx_sg.sgl[0]) += bs->tx_prologue; in bcm2835_spi_transfer_prologue()
554 sg_dma_len(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue; in bcm2835_spi_transfer_prologue()
556 sg_dma_len(&tfr->tx_sg.sgl[0]) = 0; in bcm2835_spi_transfer_prologue()
557 sg_dma_address(&tfr->tx_sg.sgl[1]) += 4; in bcm2835_spi_transfer_prologue()
558 sg_dma_len(&tfr->tx_sg.sgl[1]) -= 4; in bcm2835_spi_transfer_prologue()
563 * bcm2835_spi_undo_prologue() - reconstruct original sglist state
572 struct spi_transfer *tfr = bs->tfr; in bcm2835_spi_undo_prologue()
574 if (!bs->tx_prologue) in bcm2835_spi_undo_prologue()
577 if (bs->rx_prologue) { in bcm2835_spi_undo_prologue()
578 sg_dma_address(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue; in bcm2835_spi_undo_prologue()
579 sg_dma_len(&tfr->rx_sg.sgl[0]) += bs->rx_prologue; in bcm2835_spi_undo_prologue()
582 if (!bs->tx_buf) in bcm2835_spi_undo_prologue()
585 if (likely(!bs->tx_spillover)) { in bcm2835_spi_undo_prologue()
586 sg_dma_address(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue; in bcm2835_spi_undo_prologue()
587 sg_dma_len(&tfr->tx_sg.sgl[0]) += bs->tx_prologue; in bcm2835_spi_undo_prologue()
589 sg_dma_len(&tfr->tx_sg.sgl[0]) = bs->tx_prologue - 4; in bcm2835_spi_undo_prologue()
590 sg_dma_address(&tfr->tx_sg.sgl[1]) -= 4; in bcm2835_spi_undo_prologue()
591 sg_dma_len(&tfr->tx_sg.sgl[1]) += 4; in bcm2835_spi_undo_prologue()
594 bs->tx_prologue = 0; in bcm2835_spi_undo_prologue()
598 * bcm2835_spi_dma_rx_done() - callback for DMA RX channel
601 * Used for bidirectional and RX-only transfers.
608 /* terminate tx-dma as we do not have an irq for it in bcm2835_spi_dma_rx_done()
609 * because when the rx dma will terminate and this callback in bcm2835_spi_dma_rx_done()
610 * is called the tx-dma must have finished - can't get to this in bcm2835_spi_dma_rx_done()
613 dmaengine_terminate_async(ctlr->dma_tx); in bcm2835_spi_dma_rx_done()
614 bs->tx_dma_active = false; in bcm2835_spi_dma_rx_done()
615 bs->rx_dma_active = false; in bcm2835_spi_dma_rx_done()
618 /* reset fifo and HW */ in bcm2835_spi_dma_rx_done()
626 * bcm2835_spi_dma_tx_done() - callback for DMA TX channel
629 * Used for TX-only transfers.
636 /* busy-wait for TX FIFO to empty */ in bcm2835_spi_dma_tx_done()
638 bcm2835_wr(bs, BCM2835_SPI_CS, bs->slv->clear_rx_cs); in bcm2835_spi_dma_tx_done()
640 bs->tx_dma_active = false; in bcm2835_spi_dma_tx_done()
644 * In case of a very short transfer, RX DMA may not have been in bcm2835_spi_dma_tx_done()
648 if (cmpxchg(&bs->rx_dma_active, true, false)) in bcm2835_spi_dma_tx_done()
649 dmaengine_terminate_async(ctlr->dma_rx); in bcm2835_spi_dma_tx_done()
657 * bcm2835_spi_prepare_sg() - prepare and submit DMA descriptor for sglist
662 * @is_tx: whether to submit DMA descriptor for TX or RX sglist
664 * Prepare and submit a DMA descriptor for the TX or RX sglist of @tfr.
684 chan = ctlr->dma_tx; in bcm2835_spi_prepare_sg()
685 nents = tfr->tx_sg.nents; in bcm2835_spi_prepare_sg()
686 sgl = tfr->tx_sg.sgl; in bcm2835_spi_prepare_sg()
687 flags = tfr->rx_buf ? 0 : DMA_PREP_INTERRUPT; in bcm2835_spi_prepare_sg()
690 chan = ctlr->dma_rx; in bcm2835_spi_prepare_sg()
691 nents = tfr->rx_sg.nents; in bcm2835_spi_prepare_sg()
692 sgl = tfr->rx_sg.sgl; in bcm2835_spi_prepare_sg()
698 return -EINVAL; in bcm2835_spi_prepare_sg()
701 * Completion is signaled by the RX channel for bidirectional and in bcm2835_spi_prepare_sg()
702 * RX-only transfers; else by the TX channel for TX-only transfers. in bcm2835_spi_prepare_sg()
705 desc->callback = bcm2835_spi_dma_rx_done; in bcm2835_spi_prepare_sg()
706 desc->callback_param = ctlr; in bcm2835_spi_prepare_sg()
707 } else if (!tfr->rx_buf) { in bcm2835_spi_prepare_sg()
708 desc->callback = bcm2835_spi_dma_tx_done; in bcm2835_spi_prepare_sg()
709 desc->callback_param = ctlr; in bcm2835_spi_prepare_sg()
710 bs->slv = slv; in bcm2835_spi_prepare_sg()
713 /* submit it to DMA-engine */ in bcm2835_spi_prepare_sg()
720 * bcm2835_spi_transfer_one_dma() - perform SPI transfer using DMA engine
726 * For *bidirectional* transfers (both tx_buf and rx_buf are non-%NULL), set up
727 * the TX and RX DMA channel to copy between memory and FIFO register.
729 * For *TX-only* transfers (rx_buf is %NULL), copying the RX FIFO's contents to
730 * memory is pointless. However not reading the RX FIFO isn't an option either
732 * clear the RX FIFO by setting the CLEAR_RX bit in the CS register.
737 * when performing a TX-only transfer is to submit this descriptor to the RX
742 * Clearing the RX FIFO is paced by the DREQ signal. The signal is asserted
743 * when the RX FIFO becomes half full, i.e. 32 bytes. (Tuneable with the DC
744 * register.) Reading 32 bytes from the RX FIFO would normally require 8 bus
745 * accesses, whereas clearing it requires only 1 bus access. So an 8-fold
748 * For *RX-only* transfers (tx_buf is %NULL), fill the TX FIFO by cyclically
750 * in bcm2835_dma_init(). It must be terminated once the RX DMA channel is
756 * feature is not available on so-called "lite" channels, but normally TX DMA
757 * is backed by a full-featured channel.
759 * Zero-filling the TX FIFO is paced by the DREQ signal. Unfortunately the
762 * has finished, the DMA engine zero-fills the TX FIFO until it is half full.
764 * performed at the end of an RX-only transfer.
776 bs->count_transfer_dma++; in bcm2835_spi_transfer_one_dma()
779 * Transfer first few bytes without DMA if length of first TX or RX in bcm2835_spi_transfer_one_dma()
784 /* setup tx-DMA */ in bcm2835_spi_transfer_one_dma()
785 if (bs->tx_buf) { in bcm2835_spi_transfer_one_dma()
788 cookie = dmaengine_submit(bs->fill_tx_desc); in bcm2835_spi_transfer_one_dma()
795 bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->tx_len); in bcm2835_spi_transfer_one_dma()
801 bs->tx_dma_active = true; in bcm2835_spi_transfer_one_dma()
805 dma_async_issue_pending(ctlr->dma_tx); in bcm2835_spi_transfer_one_dma()
807 /* setup rx-DMA late - to run transfers while in bcm2835_spi_transfer_one_dma()
808 * mapping of the rx buffers still takes place in bcm2835_spi_transfer_one_dma()
811 if (bs->rx_buf) { in bcm2835_spi_transfer_one_dma()
814 cookie = dmaengine_submit(slv->clear_rx_desc); in bcm2835_spi_transfer_one_dma()
819 dmaengine_terminate_sync(ctlr->dma_tx); in bcm2835_spi_transfer_one_dma()
820 bs->tx_dma_active = false; in bcm2835_spi_transfer_one_dma()
824 /* start rx dma late */ in bcm2835_spi_transfer_one_dma()
825 dma_async_issue_pending(ctlr->dma_rx); in bcm2835_spi_transfer_one_dma()
826 bs->rx_dma_active = true; in bcm2835_spi_transfer_one_dma()
830 * In case of a very short TX-only transfer, bcm2835_spi_dma_tx_done() in bcm2835_spi_transfer_one_dma()
831 * may run before RX DMA is issued. Terminate RX DMA if so. in bcm2835_spi_transfer_one_dma()
833 if (!bs->rx_buf && !bs->tx_dma_active && in bcm2835_spi_transfer_one_dma()
834 cmpxchg(&bs->rx_dma_active, true, false)) { in bcm2835_spi_transfer_one_dma()
835 dmaengine_terminate_async(ctlr->dma_rx); in bcm2835_spi_transfer_one_dma()
853 if (tfr->len < BCM2835_SPI_DMA_MIN_LENGTH) in bcm2835_spi_can_dma()
863 if (ctlr->dma_tx) { in bcm2835_dma_release()
864 dmaengine_terminate_sync(ctlr->dma_tx); in bcm2835_dma_release()
866 if (bs->fill_tx_desc) in bcm2835_dma_release()
867 dmaengine_desc_free(bs->fill_tx_desc); in bcm2835_dma_release()
869 if (bs->fill_tx_addr) in bcm2835_dma_release()
870 dma_unmap_page_attrs(ctlr->dma_tx->device->dev, in bcm2835_dma_release()
871 bs->fill_tx_addr, sizeof(u32), in bcm2835_dma_release()
875 dma_release_channel(ctlr->dma_tx); in bcm2835_dma_release()
876 ctlr->dma_tx = NULL; in bcm2835_dma_release()
879 if (ctlr->dma_rx) { in bcm2835_dma_release()
880 dmaengine_terminate_sync(ctlr->dma_rx); in bcm2835_dma_release()
881 dma_release_channel(ctlr->dma_rx); in bcm2835_dma_release()
882 ctlr->dma_rx = NULL; in bcm2835_dma_release()
894 /* base address in dma-space */ in bcm2835_dma_init()
895 addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL); in bcm2835_dma_init()
897 dev_err(dev, "could not get DMA-register address - not using dma mode\n"); in bcm2835_dma_init()
903 /* get tx/rx dma */ in bcm2835_dma_init()
904 ctlr->dma_tx = dma_request_chan(dev, "tx"); in bcm2835_dma_init()
905 if (IS_ERR(ctlr->dma_tx)) { in bcm2835_dma_init()
906 dev_err(dev, "no tx-dma configuration found - not using dma mode\n"); in bcm2835_dma_init()
907 ret = PTR_ERR(ctlr->dma_tx); in bcm2835_dma_init()
908 ctlr->dma_tx = NULL; in bcm2835_dma_init()
911 ctlr->dma_rx = dma_request_chan(dev, "rx"); in bcm2835_dma_init()
912 if (IS_ERR(ctlr->dma_rx)) { in bcm2835_dma_init()
913 dev_err(dev, "no rx-dma configuration found - not using dma mode\n"); in bcm2835_dma_init()
914 ret = PTR_ERR(ctlr->dma_rx); in bcm2835_dma_init()
915 ctlr->dma_rx = NULL; in bcm2835_dma_init()
920 * The TX DMA channel either copies a transfer's TX buffer to the FIFO in bcm2835_dma_init()
921 * or, in case of an RX-only transfer, cyclically copies from the zero in bcm2835_dma_init()
922 * page to the FIFO using a preallocated, reusable descriptor. in bcm2835_dma_init()
927 ret = dmaengine_slave_config(ctlr->dma_tx, &slave_config); in bcm2835_dma_init()
931 bs->fill_tx_addr = dma_map_page_attrs(ctlr->dma_tx->device->dev, in bcm2835_dma_init()
935 if (dma_mapping_error(ctlr->dma_tx->device->dev, bs->fill_tx_addr)) { in bcm2835_dma_init()
936 dev_err(dev, "cannot map zero page - not using DMA mode\n"); in bcm2835_dma_init()
937 bs->fill_tx_addr = 0; in bcm2835_dma_init()
938 ret = -ENOMEM; in bcm2835_dma_init()
942 bs->fill_tx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_tx, in bcm2835_dma_init()
943 bs->fill_tx_addr, in bcm2835_dma_init()
946 if (!bs->fill_tx_desc) { in bcm2835_dma_init()
947 dev_err(dev, "cannot prepare fill_tx_desc - not using DMA mode\n"); in bcm2835_dma_init()
948 ret = -ENOMEM; in bcm2835_dma_init()
952 ret = dmaengine_desc_set_reuse(bs->fill_tx_desc); in bcm2835_dma_init()
954 dev_err(dev, "cannot reuse fill_tx_desc - not using DMA mode\n"); in bcm2835_dma_init()
959 * The RX DMA channel is used bidirectionally: It either reads the in bcm2835_dma_init()
960 * RX FIFO or, in case of a TX-only transfer, cyclically writes a in bcm2835_dma_init()
961 * precalculated value to the CS register to clear the RX FIFO. in bcm2835_dma_init()
968 ret = dmaengine_slave_config(ctlr->dma_rx, &slave_config); in bcm2835_dma_init()
973 ctlr->can_dma = bcm2835_spi_can_dma; in bcm2835_dma_init()
978 dev_err(dev, "issue configuring dma: %d - not using DMA mode\n", in bcm2835_dma_init()
987 if (ret != -EPROBE_DEFER) in bcm2835_dma_init()
1002 bs->count_transfer_polling++; in bcm2835_spi_transfer_one_poll()
1007 /* fill in the fifo before timeout calculations in bcm2835_spi_transfer_one_poll()
1017 while (bs->rx_len) { in bcm2835_spi_transfer_one_poll()
1018 /* fill in tx fifo with remaining data */ in bcm2835_spi_transfer_one_poll()
1021 /* read from fifo as much as possible */ in bcm2835_spi_transfer_one_poll()
1027 if (bs->rx_len && time_after(jiffies, timeout)) { in bcm2835_spi_transfer_one_poll()
1028 dev_dbg_ratelimited(&spi->dev, in bcm2835_spi_transfer_one_poll()
1029 … "timeout period reached: jiffies: %lu remaining tx/rx: %d/%d - falling back to interrupt mode\n", in bcm2835_spi_transfer_one_poll()
1030 jiffies - timeout, in bcm2835_spi_transfer_one_poll()
1031 bs->tx_len, bs->rx_len); in bcm2835_spi_transfer_one_poll()
1035 bs->count_transfer_irq_after_polling++; in bcm2835_spi_transfer_one_poll()
1042 /* Transfer complete - reset SPI HW */ in bcm2835_spi_transfer_one_poll()
1056 u32 cs = slv->prepare_cs; in bcm2835_spi_transfer_one()
1059 spi_hz = tfr->speed_hz; in bcm2835_spi_transfer_one()
1061 if (spi_hz >= bs->clk_hz / 2) { in bcm2835_spi_transfer_one()
1065 cdiv = DIV_ROUND_UP(bs->clk_hz, spi_hz); in bcm2835_spi_transfer_one()
1073 tfr->effective_speed_hz = cdiv ? (bs->clk_hz / cdiv) : (bs->clk_hz / 65536); in bcm2835_spi_transfer_one()
1076 /* handle all the 3-wire mode */ in bcm2835_spi_transfer_one()
1077 if (spi->mode & SPI_3WIRE && tfr->rx_buf) in bcm2835_spi_transfer_one()
1081 bs->tx_buf = tfr->tx_buf; in bcm2835_spi_transfer_one()
1082 bs->rx_buf = tfr->rx_buf; in bcm2835_spi_transfer_one()
1083 bs->tx_len = tfr->len; in bcm2835_spi_transfer_one()
1084 bs->rx_len = tfr->len; in bcm2835_spi_transfer_one()
1089 * per byte per polling limit. E.g., we can transfer 1 byte in 30 us in bcm2835_spi_transfer_one()
1093 byte_limit = hz_per_byte ? tfr->effective_speed_hz / hz_per_byte : 1; in bcm2835_spi_transfer_one()
1096 if (tfr->len < byte_limit) in bcm2835_spi_transfer_one()
1103 if (ctlr->can_dma && bcm2835_spi_can_dma(ctlr, spi, tfr)) in bcm2835_spi_transfer_one()
1106 /* run in interrupt-mode */ in bcm2835_spi_transfer_one()
1113 struct spi_device *spi = msg->spi; in bcm2835_spi_prepare_message()
1118 if (ctlr->can_dma) { in bcm2835_spi_prepare_message()
1121 * the SPI HW due to DLEN. Split up transfers (32-bit FIFO in bcm2835_spi_prepare_message()
1134 bcm2835_wr(bs, BCM2835_SPI_CS, slv->prepare_cs); in bcm2835_spi_prepare_message()
1145 if (ctlr->dma_tx) { in bcm2835_spi_handle_err()
1146 dmaengine_terminate_sync(ctlr->dma_tx); in bcm2835_spi_handle_err()
1147 bs->tx_dma_active = false; in bcm2835_spi_handle_err()
1149 if (ctlr->dma_rx) { in bcm2835_spi_handle_err()
1150 dmaengine_terminate_sync(ctlr->dma_rx); in bcm2835_spi_handle_err()
1151 bs->rx_dma_active = false; in bcm2835_spi_handle_err()
1161 return !strcmp(chip->label, data); in chip_match_name()
1167 struct spi_controller *ctlr = spi->controller; in bcm2835_spi_cleanup()
1169 if (slv->clear_rx_desc) in bcm2835_spi_cleanup()
1170 dmaengine_desc_free(slv->clear_rx_desc); in bcm2835_spi_cleanup()
1172 if (slv->clear_rx_addr) in bcm2835_spi_cleanup()
1173 dma_unmap_single(ctlr->dma_rx->device->dev, in bcm2835_spi_cleanup()
1174 slv->clear_rx_addr, in bcm2835_spi_cleanup()
1188 if (!ctlr->dma_rx) in bcm2835_spi_setup_dma()
1191 slv->clear_rx_addr = dma_map_single(ctlr->dma_rx->device->dev, in bcm2835_spi_setup_dma()
1192 &slv->clear_rx_cs, in bcm2835_spi_setup_dma()
1195 if (dma_mapping_error(ctlr->dma_rx->device->dev, slv->clear_rx_addr)) { in bcm2835_spi_setup_dma()
1196 dev_err(&spi->dev, "cannot map clear_rx_cs\n"); in bcm2835_spi_setup_dma()
1197 slv->clear_rx_addr = 0; in bcm2835_spi_setup_dma()
1198 return -ENOMEM; in bcm2835_spi_setup_dma()
1201 slv->clear_rx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_rx, in bcm2835_spi_setup_dma()
1202 slv->clear_rx_addr, in bcm2835_spi_setup_dma()
1205 if (!slv->clear_rx_desc) { in bcm2835_spi_setup_dma()
1206 dev_err(&spi->dev, "cannot prepare clear_rx_desc\n"); in bcm2835_spi_setup_dma()
1207 return -ENOMEM; in bcm2835_spi_setup_dma()
1210 ret = dmaengine_desc_set_reuse(slv->clear_rx_desc); in bcm2835_spi_setup_dma()
1212 dev_err(&spi->dev, "cannot reuse clear_rx_desc\n"); in bcm2835_spi_setup_dma()
1221 struct spi_controller *ctlr = spi->controller; in bcm2835_spi_setup()
1232 return -ENOMEM; in bcm2835_spi_setup()
1242 * Precalculate SPI slave's CS register value for ->prepare_message(): in bcm2835_spi_setup()
1243 * The driver always uses software-controlled GPIO chip select, hence in bcm2835_spi_setup()
1244 * set the hardware-controlled native chip select to an invalid value in bcm2835_spi_setup()
1248 if (spi->mode & SPI_CPOL) in bcm2835_spi_setup()
1250 if (spi->mode & SPI_CPHA) in bcm2835_spi_setup()
1252 slv->prepare_cs = cs; in bcm2835_spi_setup()
1255 * Precalculate SPI slave's CS register value to clear RX FIFO in bcm2835_spi_setup()
1256 * in case of a TX-only DMA transfer. in bcm2835_spi_setup()
1258 if (ctlr->dma_rx) { in bcm2835_spi_setup()
1259 slv->clear_rx_cs = cs | BCM2835_SPI_CS_TA | in bcm2835_spi_setup()
1262 dma_sync_single_for_device(ctlr->dma_rx->device->dev, in bcm2835_spi_setup()
1263 slv->clear_rx_addr, in bcm2835_spi_setup()
1269 * sanity checking the native-chipselects in bcm2835_spi_setup()
1271 if (spi->mode & SPI_NO_CS) in bcm2835_spi_setup()
1277 if (spi->cs_gpiod) in bcm2835_spi_setup()
1279 if (spi->chip_select > 1) { in bcm2835_spi_setup()
1284 dev_err(&spi->dev, in bcm2835_spi_setup()
1285 "setup: only two native chip-selects are supported\n"); in bcm2835_spi_setup()
1286 ret = -EINVAL; in bcm2835_spi_setup()
1295 * and fix it. Why is the GPIO descriptor in spi->cs_gpiod in bcm2835_spi_setup()
1300 chip = gpiochip_find("pinctrl-bcm2835", chip_match_name); in bcm2835_spi_setup()
1304 spi->cs_gpiod = gpiochip_request_own_desc(chip, 8 - spi->chip_select, in bcm2835_spi_setup()
1308 if (IS_ERR(spi->cs_gpiod)) { in bcm2835_spi_setup()
1309 ret = PTR_ERR(spi->cs_gpiod); in bcm2835_spi_setup()
1314 dev_info(&spi->dev, "setting up native-CS%i to use GPIO\n", in bcm2835_spi_setup()
1315 spi->chip_select); in bcm2835_spi_setup()
1330 ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*bs)); in bcm2835_spi_probe()
1332 return -ENOMEM; in bcm2835_spi_probe()
1336 ctlr->use_gpio_descriptors = true; in bcm2835_spi_probe()
1337 ctlr->mode_bits = BCM2835_SPI_MODE_BITS; in bcm2835_spi_probe()
1338 ctlr->bits_per_word_mask = SPI_BPW_MASK(8); in bcm2835_spi_probe()
1339 ctlr->num_chipselect = 3; in bcm2835_spi_probe()
1340 ctlr->setup = bcm2835_spi_setup; in bcm2835_spi_probe()
1341 ctlr->cleanup = bcm2835_spi_cleanup; in bcm2835_spi_probe()
1342 ctlr->transfer_one = bcm2835_spi_transfer_one; in bcm2835_spi_probe()
1343 ctlr->handle_err = bcm2835_spi_handle_err; in bcm2835_spi_probe()
1344 ctlr->prepare_message = bcm2835_spi_prepare_message; in bcm2835_spi_probe()
1345 ctlr->dev.of_node = pdev->dev.of_node; in bcm2835_spi_probe()
1348 bs->ctlr = ctlr; in bcm2835_spi_probe()
1350 bs->regs = devm_platform_ioremap_resource(pdev, 0); in bcm2835_spi_probe()
1351 if (IS_ERR(bs->regs)) in bcm2835_spi_probe()
1352 return PTR_ERR(bs->regs); in bcm2835_spi_probe()
1354 bs->clk = devm_clk_get(&pdev->dev, NULL); in bcm2835_spi_probe()
1355 if (IS_ERR(bs->clk)) in bcm2835_spi_probe()
1356 return dev_err_probe(&pdev->dev, PTR_ERR(bs->clk), in bcm2835_spi_probe()
1359 ctlr->max_speed_hz = clk_get_rate(bs->clk) / 2; in bcm2835_spi_probe()
1361 bs->irq = platform_get_irq(pdev, 0); in bcm2835_spi_probe()
1362 if (bs->irq <= 0) in bcm2835_spi_probe()
1363 return bs->irq ? bs->irq : -ENODEV; in bcm2835_spi_probe()
1365 clk_prepare_enable(bs->clk); in bcm2835_spi_probe()
1366 bs->clk_hz = clk_get_rate(bs->clk); in bcm2835_spi_probe()
1368 err = bcm2835_dma_init(ctlr, &pdev->dev, bs); in bcm2835_spi_probe()
1376 err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, in bcm2835_spi_probe()
1377 IRQF_SHARED, dev_name(&pdev->dev), bs); in bcm2835_spi_probe()
1379 dev_err(&pdev->dev, "could not request IRQ: %d\n", err); in bcm2835_spi_probe()
1385 dev_err(&pdev->dev, "could not register SPI controller: %d\n", in bcm2835_spi_probe()
1390 bcm2835_debugfs_create(bs, dev_name(&pdev->dev)); in bcm2835_spi_probe()
1397 clk_disable_unprepare(bs->clk); in bcm2835_spi_probe()
1416 clk_disable_unprepare(bs->clk); in bcm2835_spi_remove()
1427 dev_err(&pdev->dev, "failed to shutdown\n"); in bcm2835_spi_shutdown()
1431 { .compatible = "brcm,bcm2835-spi", },