Lines Matching +full:jz4780 +full:- +full:dma

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
13 #include <linux/dma-mapping.h>
19 #include <linux/mmc/slot-gpio.h>
130 * is in-flight. This is used via the pre_req/post_req hooks.
174 /* DMA support */
179 /* The DMA trigger level is 8 words, that is to say, the DMA read
180 * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write
189 if (host->version >= JZ_MMC_JZ4725B) in jz4740_mmc_write_irq_mask()
190 return writel(val, host->base + JZ_REG_MMC_IMASK); in jz4740_mmc_write_irq_mask()
192 return writew(val, host->base + JZ_REG_MMC_IMASK); in jz4740_mmc_write_irq_mask()
198 if (host->version >= JZ_MMC_JZ4780) in jz4740_mmc_write_irq_reg()
199 writel(val, host->base + JZ_REG_MMC_IREG); in jz4740_mmc_write_irq_reg()
201 writew(val, host->base + JZ_REG_MMC_IREG); in jz4740_mmc_write_irq_reg()
206 if (host->version >= JZ_MMC_JZ4780) in jz4740_mmc_read_irq_reg()
207 return readl(host->base + JZ_REG_MMC_IREG); in jz4740_mmc_read_irq_reg()
209 return readw(host->base + JZ_REG_MMC_IREG); in jz4740_mmc_read_irq_reg()
212 /*----------------------------------------------------------------------------*/
213 /* DMA infrastructure */
217 if (!host->use_dma) in jz4740_mmc_release_dma_channels()
220 dma_release_channel(host->dma_tx); in jz4740_mmc_release_dma_channels()
221 dma_release_channel(host->dma_rx); in jz4740_mmc_release_dma_channels()
226 host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx"); in jz4740_mmc_acquire_dma_channels()
227 if (IS_ERR(host->dma_tx)) { in jz4740_mmc_acquire_dma_channels()
228 dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n"); in jz4740_mmc_acquire_dma_channels()
229 return PTR_ERR(host->dma_tx); in jz4740_mmc_acquire_dma_channels()
232 host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx"); in jz4740_mmc_acquire_dma_channels()
233 if (IS_ERR(host->dma_rx)) { in jz4740_mmc_acquire_dma_channels()
234 dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n"); in jz4740_mmc_acquire_dma_channels()
235 dma_release_channel(host->dma_tx); in jz4740_mmc_acquire_dma_channels()
236 return PTR_ERR(host->dma_rx); in jz4740_mmc_acquire_dma_channels()
245 return (data->flags & MMC_DATA_READ) ? host->dma_rx : host->dma_tx; in jz4740_mmc_get_dma_chan()
254 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); in jz4740_mmc_dma_unmap()
255 data->host_cookie = COOKIE_UNMAPPED; in jz4740_mmc_dma_unmap()
258 /* Prepares DMA data for current or next transfer.
259 * A request can be in-flight when this is called.
269 if (data->host_cookie == COOKIE_PREMAPPED) in jz4740_mmc_prepare_dma_data()
270 return data->sg_count; in jz4740_mmc_prepare_dma_data()
272 sg_count = dma_map_sg(chan->device->dev, in jz4740_mmc_prepare_dma_data()
273 data->sg, in jz4740_mmc_prepare_dma_data()
274 data->sg_len, in jz4740_mmc_prepare_dma_data()
278 dev_err(mmc_dev(host->mmc), in jz4740_mmc_prepare_dma_data()
279 "Failed to map scatterlist for DMA operation\n"); in jz4740_mmc_prepare_dma_data()
280 return -EINVAL; in jz4740_mmc_prepare_dma_data()
283 data->sg_count = sg_count; in jz4740_mmc_prepare_dma_data()
284 data->host_cookie = cookie; in jz4740_mmc_prepare_dma_data()
286 return data->sg_count; in jz4740_mmc_prepare_dma_data()
302 if (data->flags & MMC_DATA_WRITE) { in jz4740_mmc_start_dma_transfer()
304 conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO; in jz4740_mmc_start_dma_transfer()
307 conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO; in jz4740_mmc_start_dma_transfer()
315 desc = dmaengine_prep_slave_sg(chan, data->sg, sg_count, in jz4740_mmc_start_dma_transfer()
319 dev_err(mmc_dev(host->mmc), in jz4740_mmc_start_dma_transfer()
320 "Failed to allocate DMA %s descriptor", in jz4740_mmc_start_dma_transfer()
331 if (data->host_cookie == COOKIE_MAPPED) in jz4740_mmc_start_dma_transfer()
333 return -ENOMEM; in jz4740_mmc_start_dma_transfer()
340 struct mmc_data *data = mrq->data; in jz4740_mmc_pre_request()
342 if (!host->use_dma) in jz4740_mmc_pre_request()
345 data->host_cookie = COOKIE_UNMAPPED; in jz4740_mmc_pre_request()
347 data->host_cookie = COOKIE_UNMAPPED; in jz4740_mmc_pre_request()
355 struct mmc_data *data = mrq->data; in jz4740_mmc_post_request()
357 if (data && data->host_cookie != COOKIE_UNMAPPED) in jz4740_mmc_post_request()
367 /*----------------------------------------------------------------------------*/
374 spin_lock_irqsave(&host->lock, flags); in jz4740_mmc_set_irq_enabled()
376 host->irq_mask &= ~irq; in jz4740_mmc_set_irq_enabled()
378 host->irq_mask |= irq; in jz4740_mmc_set_irq_enabled()
380 jz4740_mmc_write_irq_mask(host, host->irq_mask); in jz4740_mmc_set_irq_enabled()
381 spin_unlock_irqrestore(&host->lock, flags); in jz4740_mmc_set_irq_enabled()
392 writew(val, host->base + JZ_REG_MMC_STRPCL); in jz4740_mmc_clock_enable()
400 writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL); in jz4740_mmc_clock_disable()
402 status = readl(host->base + JZ_REG_MMC_STATUS); in jz4740_mmc_clock_disable()
403 } while (status & JZ_MMC_STATUS_CLK_EN && --timeout); in jz4740_mmc_clock_disable()
411 writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL); in jz4740_mmc_reset()
414 status = readl(host->base + JZ_REG_MMC_STATUS); in jz4740_mmc_reset()
415 } while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout); in jz4740_mmc_reset()
423 req = host->req; in jz4740_mmc_request_done()
424 data = req->data; in jz4740_mmc_request_done()
425 host->req = NULL; in jz4740_mmc_request_done()
427 if (data && data->host_cookie == COOKIE_MAPPED) in jz4740_mmc_request_done()
429 mmc_request_done(host->mmc, req); in jz4740_mmc_request_done()
440 } while (!(status & irq) && --timeout); in jz4740_mmc_poll_irq()
443 set_bit(0, &host->waiting); in jz4740_mmc_poll_irq()
444 mod_timer(&host->timeout_timer, in jz4740_mmc_poll_irq()
458 status = readl(host->base + JZ_REG_MMC_STATUS); in jz4740_mmc_transfer_check_state()
461 host->req->cmd->error = -ETIMEDOUT; in jz4740_mmc_transfer_check_state()
462 data->error = -ETIMEDOUT; in jz4740_mmc_transfer_check_state()
464 host->req->cmd->error = -EIO; in jz4740_mmc_transfer_check_state()
465 data->error = -EIO; in jz4740_mmc_transfer_check_state()
469 host->req->cmd->error = -ETIMEDOUT; in jz4740_mmc_transfer_check_state()
470 data->error = -ETIMEDOUT; in jz4740_mmc_transfer_check_state()
472 host->req->cmd->error = -EIO; in jz4740_mmc_transfer_check_state()
473 data->error = -EIO; in jz4740_mmc_transfer_check_state()
481 struct sg_mapping_iter *miter = &host->miter; in jz4740_mmc_write_data()
482 void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO; in jz4740_mmc_write_data()
488 buf = miter->addr; in jz4740_mmc_write_data()
489 i = miter->length / 4; in jz4740_mmc_write_data()
506 --j; in jz4740_mmc_write_data()
516 --i; in jz4740_mmc_write_data()
519 data->bytes_xfered += miter->length; in jz4740_mmc_write_data()
526 miter->consumed = (void *)buf - miter->addr; in jz4740_mmc_write_data()
527 data->bytes_xfered += miter->consumed; in jz4740_mmc_write_data()
536 struct sg_mapping_iter *miter = &host->miter; in jz4740_mmc_read_data()
537 void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO; in jz4740_mmc_read_data()
545 buf = miter->addr; in jz4740_mmc_read_data()
546 i = miter->length; in jz4740_mmc_read_data()
564 --j; in jz4740_mmc_read_data()
574 i -= 4; in jz4740_mmc_read_data()
581 data->bytes_xfered += miter->length; in jz4740_mmc_read_data()
585 flush_dcache_page(miter->page); in jz4740_mmc_read_data()
592 status = readl(host->base + JZ_REG_MMC_STATUS); in jz4740_mmc_read_data()
593 while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) { in jz4740_mmc_read_data()
595 status = readl(host->base + JZ_REG_MMC_STATUS); in jz4740_mmc_read_data()
601 miter->consumed = (void *)buf - miter->addr; in jz4740_mmc_read_data()
602 data->bytes_xfered += miter->consumed; in jz4740_mmc_read_data()
612 if (!test_and_clear_bit(0, &host->waiting)) in jz4740_mmc_timeout()
617 host->req->cmd->error = -ETIMEDOUT; in jz4740_mmc_timeout()
626 void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO; in jz4740_mmc_read_response()
628 if (cmd->flags & MMC_RSP_136) { in jz4740_mmc_read_response()
631 cmd->resp[i] = tmp << 24; in jz4740_mmc_read_response()
633 cmd->resp[i] |= tmp << 8; in jz4740_mmc_read_response()
635 cmd->resp[i] |= tmp >> 8; in jz4740_mmc_read_response()
638 cmd->resp[0] = readw(fifo_addr) << 24; in jz4740_mmc_read_response()
639 cmd->resp[0] |= readw(fifo_addr) << 8; in jz4740_mmc_read_response()
640 cmd->resp[0] |= readw(fifo_addr) & 0xff; in jz4740_mmc_read_response()
647 uint32_t cmdat = host->cmdat; in jz4740_mmc_send_command()
649 host->cmdat &= ~JZ_MMC_CMDAT_INIT; in jz4740_mmc_send_command()
652 host->cmd = cmd; in jz4740_mmc_send_command()
654 if (cmd->flags & MMC_RSP_BUSY) in jz4740_mmc_send_command()
672 if (cmd->data) { in jz4740_mmc_send_command()
674 if (cmd->data->flags & MMC_DATA_WRITE) in jz4740_mmc_send_command()
676 if (host->use_dma) { in jz4740_mmc_send_command()
678 * The 4780's MMC controller has integrated DMA ability in jz4740_mmc_send_command()
679 * in addition to being able to use the external DMA in jz4740_mmc_send_command()
680 * controller. It moves DMA control bits to a separate in jz4740_mmc_send_command()
684 * single DMA enable bit in CMDAT. in jz4740_mmc_send_command()
686 if (host->version >= JZ_MMC_JZ4780) { in jz4740_mmc_send_command()
688 host->base + JZ_REG_MMC_DMAC); in jz4740_mmc_send_command()
692 } else if (host->version >= JZ_MMC_JZ4780) { in jz4740_mmc_send_command()
693 writel(0, host->base + JZ_REG_MMC_DMAC); in jz4740_mmc_send_command()
696 writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN); in jz4740_mmc_send_command()
697 writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB); in jz4740_mmc_send_command()
700 writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD); in jz4740_mmc_send_command()
701 writel(cmd->arg, host->base + JZ_REG_MMC_ARG); in jz4740_mmc_send_command()
702 writel(cmdat, host->base + JZ_REG_MMC_CMDAT); in jz4740_mmc_send_command()
709 struct mmc_command *cmd = host->req->cmd; in jz_mmc_prepare_data_transfer()
710 struct mmc_data *data = cmd->data; in jz_mmc_prepare_data_transfer()
713 if (data->flags & MMC_DATA_READ) in jz_mmc_prepare_data_transfer()
718 sg_miter_start(&host->miter, data->sg, data->sg_len, direction); in jz_mmc_prepare_data_transfer()
725 struct mmc_command *cmd = host->req->cmd; in jz_mmc_irq_worker()
726 struct mmc_request *req = host->req; in jz_mmc_irq_worker()
727 struct mmc_data *data = cmd->data; in jz_mmc_irq_worker()
730 if (cmd->error) in jz_mmc_irq_worker()
731 host->state = JZ4740_MMC_STATE_DONE; in jz_mmc_irq_worker()
733 switch (host->state) { in jz_mmc_irq_worker()
735 if (cmd->flags & MMC_RSP_PRESENT) in jz_mmc_irq_worker()
745 if (host->use_dma) { in jz_mmc_irq_worker()
746 /* Use DMA if enabled. in jz_mmc_irq_worker()
753 data->bytes_xfered = data->blocks * data->blksz; in jz_mmc_irq_worker()
754 } else if (data->flags & MMC_DATA_READ) in jz_mmc_irq_worker()
755 /* Use PIO if DMA is not enabled. in jz_mmc_irq_worker()
765 host->state = JZ4740_MMC_STATE_TRANSFER_DATA; in jz_mmc_irq_worker()
773 host->state = JZ4740_MMC_STATE_SEND_STOP; in jz_mmc_irq_worker()
780 if (!req->stop) in jz_mmc_irq_worker()
783 jz4740_mmc_send_command(host, req->stop); in jz_mmc_irq_worker()
785 if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) { in jz_mmc_irq_worker()
789 host->state = JZ4740_MMC_STATE_DONE; in jz_mmc_irq_worker()
806 struct mmc_command *cmd = host->cmd; in jz_mmc_irq()
809 status = readl(host->base + JZ_REG_MMC_STATUS); in jz_mmc_irq()
813 irq_reg &= ~host->irq_mask; in jz_mmc_irq()
823 mmc_signal_sdio_irq(host->mmc); in jz_mmc_irq()
827 if (host->req && cmd && irq_reg) { in jz_mmc_irq()
828 if (test_and_clear_bit(0, &host->waiting)) { in jz_mmc_irq()
829 del_timer(&host->timeout_timer); in jz_mmc_irq()
832 cmd->error = -ETIMEDOUT; in jz_mmc_irq()
834 cmd->error = -EIO; in jz_mmc_irq()
837 if (cmd->data) in jz_mmc_irq()
838 cmd->data->error = -EIO; in jz_mmc_irq()
839 cmd->error = -EIO; in jz_mmc_irq()
858 clk_set_rate(host->clk, host->mmc->f_max); in jz4740_mmc_set_clock_rate()
860 real_rate = clk_get_rate(host->clk); in jz4740_mmc_set_clock_rate()
867 writew(div, host->base + JZ_REG_MMC_CLKRT); in jz4740_mmc_set_clock_rate()
870 if (host->version >= JZ_MMC_X1000) { in jz4740_mmc_set_clock_rate()
874 host->base + JZ_REG_MMC_LPM); in jz4740_mmc_set_clock_rate()
875 } else if (host->version >= JZ_MMC_JZ4760) { in jz4740_mmc_set_clock_rate()
878 host->base + JZ_REG_MMC_LPM); in jz4740_mmc_set_clock_rate()
879 } else if (host->version >= JZ_MMC_JZ4725B) in jz4740_mmc_set_clock_rate()
881 host->base + JZ_REG_MMC_LPM); in jz4740_mmc_set_clock_rate()
891 host->req = req; in jz4740_mmc_request()
896 host->state = JZ4740_MMC_STATE_READ_RESPONSE; in jz4740_mmc_request()
897 set_bit(0, &host->waiting); in jz4740_mmc_request()
898 mod_timer(&host->timeout_timer, in jz4740_mmc_request()
900 jz4740_mmc_send_command(host, req->cmd); in jz4740_mmc_request()
906 if (ios->clock) in jz4740_mmc_set_ios()
907 jz4740_mmc_set_clock_rate(host, ios->clock); in jz4740_mmc_set_ios()
909 switch (ios->power_mode) { in jz4740_mmc_set_ios()
912 if (!IS_ERR(mmc->supply.vmmc)) in jz4740_mmc_set_ios()
913 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); in jz4740_mmc_set_ios()
914 host->cmdat |= JZ_MMC_CMDAT_INIT; in jz4740_mmc_set_ios()
915 clk_prepare_enable(host->clk); in jz4740_mmc_set_ios()
920 if (!IS_ERR(mmc->supply.vmmc)) in jz4740_mmc_set_ios()
921 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); in jz4740_mmc_set_ios()
922 clk_disable_unprepare(host->clk); in jz4740_mmc_set_ios()
926 switch (ios->bus_width) { in jz4740_mmc_set_ios()
928 host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK; in jz4740_mmc_set_ios()
931 host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK; in jz4740_mmc_set_ios()
932 host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT; in jz4740_mmc_set_ios()
935 host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK; in jz4740_mmc_set_ios()
936 host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_8BIT; in jz4740_mmc_set_ios()
960 { .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 },
961 { .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B },
962 { .compatible = "ingenic,jz4760-mmc", .data = (void *) JZ_MMC_JZ4760 },
963 { .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 },
964 { .compatible = "ingenic,x1000-mmc", .data = (void *) JZ_MMC_X1000 },
976 mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev); in jz4740_mmc_probe()
978 dev_err(&pdev->dev, "Failed to alloc mmc host structure\n"); in jz4740_mmc_probe()
979 return -ENOMEM; in jz4740_mmc_probe()
984 match = of_match_device(jz4740_mmc_of_match, &pdev->dev); in jz4740_mmc_probe()
986 host->version = (enum jz4740_mmc_version)match->data; in jz4740_mmc_probe()
989 host->version = JZ_MMC_JZ4740; in jz4740_mmc_probe()
994 dev_err_probe(&pdev->dev, ret, "could not parse device properties\n"); in jz4740_mmc_probe()
1000 host->irq = platform_get_irq(pdev, 0); in jz4740_mmc_probe()
1001 if (host->irq < 0) { in jz4740_mmc_probe()
1002 ret = host->irq; in jz4740_mmc_probe()
1006 host->clk = devm_clk_get(&pdev->dev, "mmc"); in jz4740_mmc_probe()
1007 if (IS_ERR(host->clk)) { in jz4740_mmc_probe()
1008 ret = PTR_ERR(host->clk); in jz4740_mmc_probe()
1009 dev_err(&pdev->dev, "Failed to get mmc clock\n"); in jz4740_mmc_probe()
1013 host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); in jz4740_mmc_probe()
1014 host->base = devm_ioremap_resource(&pdev->dev, host->mem_res); in jz4740_mmc_probe()
1015 if (IS_ERR(host->base)) { in jz4740_mmc_probe()
1016 ret = PTR_ERR(host->base); in jz4740_mmc_probe()
1017 dev_err(&pdev->dev, "Failed to ioremap base memory\n"); in jz4740_mmc_probe()
1021 mmc->ops = &jz4740_mmc_ops; in jz4740_mmc_probe()
1022 if (!mmc->f_max) in jz4740_mmc_probe()
1023 mmc->f_max = JZ_MMC_CLK_RATE; in jz4740_mmc_probe()
1024 mmc->f_min = mmc->f_max / 128; in jz4740_mmc_probe()
1025 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; in jz4740_mmc_probe()
1029 * future improvement should instead respect the cmd->busy_timeout. in jz4740_mmc_probe()
1031 mmc->max_busy_timeout = JZ_MMC_REQ_TIMEOUT_MS; in jz4740_mmc_probe()
1033 mmc->max_blk_size = (1 << 10) - 1; in jz4740_mmc_probe()
1034 mmc->max_blk_count = (1 << 15) - 1; in jz4740_mmc_probe()
1035 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; in jz4740_mmc_probe()
1037 mmc->max_segs = 128; in jz4740_mmc_probe()
1038 mmc->max_seg_size = mmc->max_req_size; in jz4740_mmc_probe()
1040 host->mmc = mmc; in jz4740_mmc_probe()
1041 host->pdev = pdev; in jz4740_mmc_probe()
1042 spin_lock_init(&host->lock); in jz4740_mmc_probe()
1043 host->irq_mask = ~0; in jz4740_mmc_probe()
1047 ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0, in jz4740_mmc_probe()
1048 dev_name(&pdev->dev), host); in jz4740_mmc_probe()
1050 dev_err(&pdev->dev, "Failed to request irq: %d\n", ret); in jz4740_mmc_probe()
1055 timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0); in jz4740_mmc_probe()
1058 if (ret == -EPROBE_DEFER) in jz4740_mmc_probe()
1060 host->use_dma = !ret; in jz4740_mmc_probe()
1066 dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret); in jz4740_mmc_probe()
1069 dev_info(&pdev->dev, "Ingenic SD/MMC card driver registered\n"); in jz4740_mmc_probe()
1071 dev_info(&pdev->dev, "Using %s, %d-bit mode\n", in jz4740_mmc_probe()
1072 host->use_dma ? "DMA" : "PIO", in jz4740_mmc_probe()
1073 (mmc->caps & MMC_CAP_8_BIT_DATA) ? 8 : in jz4740_mmc_probe()
1074 ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1)); in jz4740_mmc_probe()
1079 if (host->use_dma) in jz4740_mmc_probe()
1082 free_irq(host->irq, host); in jz4740_mmc_probe()
1093 del_timer_sync(&host->timeout_timer); in jz4740_mmc_remove()
1097 mmc_remove_host(host->mmc); in jz4740_mmc_remove()
1099 free_irq(host->irq, host); in jz4740_mmc_remove()
1101 if (host->use_dma) in jz4740_mmc_remove()
1104 mmc_free_host(host->mmc); in jz4740_mmc_remove()
1126 .name = "jz4740-mmc",
1137 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");