Lines Matching +full:cmd +full:- +full:crci

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
10 #include <linux/dma-mapping.h>
27 #include "../virt-dma.h"
29 /* ADM registers - calculated from channel number and security domain */
48 #define ADM_CRCI_CTL(crci, ee) (0x400 + (crci) * ADM_CRCI_MULTI + \ argument
71 /* CRCI CTL */
99 #define ADM_MAX_XFER (SZ_64K - 1)
100 #define ADM_MAX_ROWS (SZ_64K - 1)
104 u32 cmd; member
113 u32 cmd; member
130 u32 crci; member
144 u32 crci; member
177 * adm_free_chan - Frees dma resources associated with the specific channel
190 * adm_get_blksize - Get block size from burst value
203 ret = ffs(burst >> 4) - 1; in adm_get_blksize()
212 ret = -EINVAL; in adm_get_blksize()
220 * adm_process_fc_descriptors - Process descriptors for flow controlled xfers
225 * @crci: CRCI value
230 struct scatterlist *sg, u32 crci, in adm_process_fc_descriptors() argument
243 crci_cmd = ADM_CMD_SRC_CRCI(crci); in adm_process_fc_descriptors()
245 src = &achan->slave.src_addr; in adm_process_fc_descriptors()
248 crci_cmd = ADM_CMD_DST_CRCI(crci); in adm_process_fc_descriptors()
251 dst = &achan->slave.dst_addr; in adm_process_fc_descriptors()
256 box_desc->cmd = ADM_CMD_TYPE_BOX | crci_cmd; in adm_process_fc_descriptors()
257 box_desc->row_offset = row_offset; in adm_process_fc_descriptors()
258 box_desc->src_addr = *src; in adm_process_fc_descriptors()
259 box_desc->dst_addr = *dst; in adm_process_fc_descriptors()
263 box_desc->num_rows = rows << 16 | rows; in adm_process_fc_descriptors()
264 box_desc->row_len = burst << 16 | burst; in adm_process_fc_descriptors()
267 remainder -= burst * rows; in adm_process_fc_descriptors()
274 single_desc->cmd = ADM_CMD_TYPE_SINGLE | crci_cmd; in adm_process_fc_descriptors()
275 single_desc->len = remainder; in adm_process_fc_descriptors()
276 single_desc->src_addr = *src; in adm_process_fc_descriptors()
277 single_desc->dst_addr = *dst; in adm_process_fc_descriptors()
281 single_desc->cmd |= ADM_CMD_LC; in adm_process_fc_descriptors()
284 box_desc->cmd |= ADM_CMD_LC; in adm_process_fc_descriptors()
291 * adm_process_non_fc_descriptors - Process descriptors for non-fc xfers
309 src = &achan->slave.src_addr; in adm_process_non_fc_descriptors()
313 dst = &achan->slave.dst_addr; in adm_process_non_fc_descriptors()
318 single_desc->cmd = ADM_CMD_TYPE_SINGLE; in adm_process_non_fc_descriptors()
319 single_desc->src_addr = *src; in adm_process_non_fc_descriptors()
320 single_desc->dst_addr = *dst; in adm_process_non_fc_descriptors()
321 single_desc->len = (remainder > ADM_MAX_XFER) ? in adm_process_non_fc_descriptors()
324 remainder -= single_desc->len; in adm_process_non_fc_descriptors()
325 *incr_addr += single_desc->len; in adm_process_non_fc_descriptors()
331 single_desc->cmd |= ADM_CMD_LC; in adm_process_non_fc_descriptors()
337 * adm_prep_slave_sg - Prep slave sg transaction
354 struct adm_device *adev = achan->adev; in adm_prep_slave_sg()
359 u32 single_count = 0, box_count = 0, crci = 0; in adm_prep_slave_sg() local
365 dev_err(adev->dev, "invalid dma direction\n"); in adm_prep_slave_sg()
373 achan->slave.dst_maxburst : in adm_prep_slave_sg()
374 achan->slave.src_maxburst; in adm_prep_slave_sg()
376 /* if using flow control, validate burst and crci values */ in adm_prep_slave_sg()
377 if (achan->slave.device_fc) { in adm_prep_slave_sg()
380 dev_err(adev->dev, "invalid burst value: %d\n", in adm_prep_slave_sg()
385 crci = achan->crci & 0xf; in adm_prep_slave_sg()
386 if (!crci || achan->crci > 0x1f) { in adm_prep_slave_sg()
387 dev_err(adev->dev, "invalid crci value\n"); in adm_prep_slave_sg()
394 if (achan->slave.device_fc) { in adm_prep_slave_sg()
407 dev_err(adev->dev, "not enough memory for async_desc struct\n"); in adm_prep_slave_sg()
411 async_desc->mux = achan->mux ? ADM_CRCI_CTL_MUX_SEL : 0; in adm_prep_slave_sg()
412 async_desc->crci = crci; in adm_prep_slave_sg()
413 async_desc->blk_size = blk_size; in adm_prep_slave_sg()
414 async_desc->dma_len = single_count * sizeof(struct adm_desc_hw_single) + in adm_prep_slave_sg()
418 async_desc->cpl = kzalloc(async_desc->dma_len, GFP_NOWAIT); in adm_prep_slave_sg()
419 if (!async_desc->cpl) { in adm_prep_slave_sg()
420 dev_err(adev->dev, "not enough memory for cpl struct\n"); in adm_prep_slave_sg()
424 async_desc->adev = adev; in adm_prep_slave_sg()
427 cple = PTR_ALIGN(async_desc->cpl, ADM_DESC_ALIGN); in adm_prep_slave_sg()
431 async_desc->length += sg_dma_len(sg); in adm_prep_slave_sg()
433 if (achan->slave.device_fc) in adm_prep_slave_sg()
434 desc = adm_process_fc_descriptors(achan, desc, sg, crci, in adm_prep_slave_sg()
441 async_desc->dma_addr = dma_map_single(adev->dev, async_desc->cpl, in adm_prep_slave_sg()
442 async_desc->dma_len, in adm_prep_slave_sg()
444 if (dma_mapping_error(adev->dev, async_desc->dma_addr)) { in adm_prep_slave_sg()
445 dev_err(adev->dev, "dma mapping error for cpl\n"); in adm_prep_slave_sg()
449 cple_addr = async_desc->dma_addr + ((void *)cple - async_desc->cpl); in adm_prep_slave_sg()
451 /* init cmd list */ in adm_prep_slave_sg()
452 dma_sync_single_for_cpu(adev->dev, cple_addr, sizeof(*cple), in adm_prep_slave_sg()
455 *cple |= (async_desc->dma_addr + ADM_DESC_ALIGN) >> 3; in adm_prep_slave_sg()
456 dma_sync_single_for_device(adev->dev, cple_addr, sizeof(*cple), in adm_prep_slave_sg()
459 return vchan_tx_prep(&achan->vc, &async_desc->vd, flags); in adm_prep_slave_sg()
467 * adm_terminate_all - terminate all transactions on a channel
477 struct adm_device *adev = achan->adev; in adm_terminate_all()
481 spin_lock_irqsave(&achan->vc.lock, flags); in adm_terminate_all()
482 vchan_get_all_descriptors(&achan->vc, &head); in adm_terminate_all()
486 adev->regs + ADM_CH_FLUSH_STATE0(achan->id, adev->ee)); in adm_terminate_all()
488 spin_unlock_irqrestore(&achan->vc.lock, flags); in adm_terminate_all()
490 vchan_dma_desc_free_list(&achan->vc, &head); in adm_terminate_all()
498 struct qcom_adm_peripheral_config *config = cfg->peripheral_config; in adm_slave_config()
501 spin_lock_irqsave(&achan->vc.lock, flag); in adm_slave_config()
502 memcpy(&achan->slave, cfg, sizeof(struct dma_slave_config)); in adm_slave_config()
503 if (cfg->peripheral_size == sizeof(*config)) in adm_slave_config()
504 achan->crci = config->crci; in adm_slave_config()
505 spin_unlock_irqrestore(&achan->vc.lock, flag); in adm_slave_config()
511 * adm_start_dma - start next transaction
516 struct virt_dma_desc *vd = vchan_next_desc(&achan->vc); in adm_start_dma()
517 struct adm_device *adev = achan->adev; in adm_start_dma()
520 lockdep_assert_held(&achan->vc.lock); in adm_start_dma()
525 list_del(&vd->node); in adm_start_dma()
527 /* write next command list out to the CMD FIFO */ in adm_start_dma()
529 achan->curr_txd = async_desc; in adm_start_dma()
532 achan->error = 0; in adm_start_dma()
534 if (!achan->initialized) { in adm_start_dma()
539 ADM_CH_CONF_SEC_DOMAIN(adev->ee), in adm_start_dma()
540 adev->regs + ADM_CH_CONF(achan->id)); in adm_start_dma()
543 adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee)); in adm_start_dma()
545 achan->initialized = 1; in adm_start_dma()
548 /* set the crci block size if this transaction requires CRCI */ in adm_start_dma()
549 if (async_desc->crci) { in adm_start_dma()
550 writel(async_desc->mux | async_desc->blk_size, in adm_start_dma()
551 adev->regs + ADM_CRCI_CTL(async_desc->crci, adev->ee)); in adm_start_dma()
557 /* write next command list out to the CMD FIFO */ in adm_start_dma()
558 writel(ALIGN(async_desc->dma_addr, ADM_DESC_ALIGN) >> 3, in adm_start_dma()
559 adev->regs + ADM_CH_CMD_PTR(achan->id, adev->ee)); in adm_start_dma()
563 * adm_dma_irq - irq handler for ADM controller
576 srcs = readl_relaxed(adev->regs + in adm_dma_irq()
577 ADM_SEC_DOMAIN_IRQ_STATUS(adev->ee)); in adm_dma_irq()
580 struct adm_chan *achan = &adev->channels[i]; in adm_dma_irq()
584 status = readl_relaxed(adev->regs + in adm_dma_irq()
585 ADM_CH_STATUS_SD(i, adev->ee)); in adm_dma_irq()
591 result = readl_relaxed(adev->regs + in adm_dma_irq()
592 ADM_CH_RSLT(i, adev->ee)); in adm_dma_irq()
600 achan->error = 1; in adm_dma_irq()
602 spin_lock_irqsave(&achan->vc.lock, flags); in adm_dma_irq()
603 async_desc = achan->curr_txd; in adm_dma_irq()
605 achan->curr_txd = NULL; in adm_dma_irq()
608 vchan_cookie_complete(&async_desc->vd); in adm_dma_irq()
614 spin_unlock_irqrestore(&achan->vc.lock, flags); in adm_dma_irq()
622 * adm_tx_status - returns status of transaction
642 spin_lock_irqsave(&achan->vc.lock, flags); in adm_tx_status()
644 vd = vchan_find_desc(&achan->vc, cookie); in adm_tx_status()
646 residue = container_of(vd, struct adm_async_desc, vd)->length; in adm_tx_status()
648 spin_unlock_irqrestore(&achan->vc.lock, flags); in adm_tx_status()
657 if (achan->error) in adm_tx_status()
664 * adm_issue_pending - starts pending transactions
674 spin_lock_irqsave(&achan->vc.lock, flags); in adm_issue_pending()
676 if (vchan_issue_pending(&achan->vc) && !achan->curr_txd) in adm_issue_pending()
678 spin_unlock_irqrestore(&achan->vc.lock, flags); in adm_issue_pending()
682 * adm_dma_free_desc - free descriptor memory
691 dma_unmap_single(async_desc->adev->dev, async_desc->dma_addr, in adm_dma_free_desc()
692 async_desc->dma_len, DMA_TO_DEVICE); in adm_dma_free_desc()
693 kfree(async_desc->cpl); in adm_dma_free_desc()
700 achan->id = index; in adm_channel_init()
701 achan->adev = adev; in adm_channel_init()
703 vchan_init(&achan->vc, &adev->common); in adm_channel_init()
704 achan->vc.desc_free = adm_dma_free_desc; in adm_channel_init()
712 * This can use either 1-cell or 2-cell formats, the first cell
714 * contains the crci value.
721 struct dma_device *dev = ofdma->of_dma_data; in adm_dma_xlate()
725 if (!dev || dma_spec->args_count > 2) in adm_dma_xlate()
728 list_for_each_entry(chan, &dev->channels, device_node) in adm_dma_xlate()
729 if (chan->chan_id == dma_spec->args[0]) { in adm_dma_xlate()
738 if (dma_spec->args_count == 2) in adm_dma_xlate()
739 achan->crci = dma_spec->args[1]; in adm_dma_xlate()
741 achan->crci = 0; in adm_dma_xlate()
752 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL); in adm_dma_probe()
754 return -ENOMEM; in adm_dma_probe()
756 adev->dev = &pdev->dev; in adm_dma_probe()
758 adev->regs = devm_platform_ioremap_resource(pdev, 0); in adm_dma_probe()
759 if (IS_ERR(adev->regs)) in adm_dma_probe()
760 return PTR_ERR(adev->regs); in adm_dma_probe()
762 adev->irq = platform_get_irq(pdev, 0); in adm_dma_probe()
763 if (adev->irq < 0) in adm_dma_probe()
764 return adev->irq; in adm_dma_probe()
766 ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &adev->ee); in adm_dma_probe()
768 dev_err(adev->dev, "Execution environment unspecified\n"); in adm_dma_probe()
772 adev->core_clk = devm_clk_get(adev->dev, "core"); in adm_dma_probe()
773 if (IS_ERR(adev->core_clk)) in adm_dma_probe()
774 return PTR_ERR(adev->core_clk); in adm_dma_probe()
776 adev->iface_clk = devm_clk_get(adev->dev, "iface"); in adm_dma_probe()
777 if (IS_ERR(adev->iface_clk)) in adm_dma_probe()
778 return PTR_ERR(adev->iface_clk); in adm_dma_probe()
780 adev->clk_reset = devm_reset_control_get_exclusive(&pdev->dev, "clk"); in adm_dma_probe()
781 if (IS_ERR(adev->clk_reset)) { in adm_dma_probe()
782 dev_err(adev->dev, "failed to get ADM0 reset\n"); in adm_dma_probe()
783 return PTR_ERR(adev->clk_reset); in adm_dma_probe()
786 adev->c0_reset = devm_reset_control_get_exclusive(&pdev->dev, "c0"); in adm_dma_probe()
787 if (IS_ERR(adev->c0_reset)) { in adm_dma_probe()
788 dev_err(adev->dev, "failed to get ADM0 C0 reset\n"); in adm_dma_probe()
789 return PTR_ERR(adev->c0_reset); in adm_dma_probe()
792 adev->c1_reset = devm_reset_control_get_exclusive(&pdev->dev, "c1"); in adm_dma_probe()
793 if (IS_ERR(adev->c1_reset)) { in adm_dma_probe()
794 dev_err(adev->dev, "failed to get ADM0 C1 reset\n"); in adm_dma_probe()
795 return PTR_ERR(adev->c1_reset); in adm_dma_probe()
798 adev->c2_reset = devm_reset_control_get_exclusive(&pdev->dev, "c2"); in adm_dma_probe()
799 if (IS_ERR(adev->c2_reset)) { in adm_dma_probe()
800 dev_err(adev->dev, "failed to get ADM0 C2 reset\n"); in adm_dma_probe()
801 return PTR_ERR(adev->c2_reset); in adm_dma_probe()
804 ret = clk_prepare_enable(adev->core_clk); in adm_dma_probe()
806 dev_err(adev->dev, "failed to prepare/enable core clock\n"); in adm_dma_probe()
810 ret = clk_prepare_enable(adev->iface_clk); in adm_dma_probe()
812 dev_err(adev->dev, "failed to prepare/enable iface clock\n"); in adm_dma_probe()
816 reset_control_assert(adev->clk_reset); in adm_dma_probe()
817 reset_control_assert(adev->c0_reset); in adm_dma_probe()
818 reset_control_assert(adev->c1_reset); in adm_dma_probe()
819 reset_control_assert(adev->c2_reset); in adm_dma_probe()
823 reset_control_deassert(adev->clk_reset); in adm_dma_probe()
824 reset_control_deassert(adev->c0_reset); in adm_dma_probe()
825 reset_control_deassert(adev->c1_reset); in adm_dma_probe()
826 reset_control_deassert(adev->c2_reset); in adm_dma_probe()
828 adev->channels = devm_kcalloc(adev->dev, ADM_MAX_CHANNELS, in adm_dma_probe()
829 sizeof(*adev->channels), GFP_KERNEL); in adm_dma_probe()
831 if (!adev->channels) { in adm_dma_probe()
832 ret = -ENOMEM; in adm_dma_probe()
837 INIT_LIST_HEAD(&adev->common.channels); in adm_dma_probe()
840 adm_channel_init(adev, &adev->channels[i], i); in adm_dma_probe()
844 writel(ADM_CRCI_CTL_RST, adev->regs + in adm_dma_probe()
845 ADM_CRCI_CTL(i, adev->ee)); in adm_dma_probe()
849 ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(0)); in adm_dma_probe()
851 ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(1)); in adm_dma_probe()
853 ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(2)); in adm_dma_probe()
855 adev->regs + ADM_GP_CTL); in adm_dma_probe()
857 ret = devm_request_irq(adev->dev, adev->irq, adm_dma_irq, in adm_dma_probe()
864 adev->common.dev = adev->dev; in adm_dma_probe()
865 adev->common.dev->dma_parms = &adev->dma_parms; in adm_dma_probe()
868 dma_cap_zero(adev->common.cap_mask); in adm_dma_probe()
869 dma_cap_set(DMA_SLAVE, adev->common.cap_mask); in adm_dma_probe()
870 dma_cap_set(DMA_PRIVATE, adev->common.cap_mask); in adm_dma_probe()
873 adev->common.directions = BIT(DMA_DEV_TO_MEM | DMA_MEM_TO_DEV); in adm_dma_probe()
874 adev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; in adm_dma_probe()
875 adev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES; in adm_dma_probe()
876 adev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES; in adm_dma_probe()
877 adev->common.device_free_chan_resources = adm_free_chan; in adm_dma_probe()
878 adev->common.device_prep_slave_sg = adm_prep_slave_sg; in adm_dma_probe()
879 adev->common.device_issue_pending = adm_issue_pending; in adm_dma_probe()
880 adev->common.device_tx_status = adm_tx_status; in adm_dma_probe()
881 adev->common.device_terminate_all = adm_terminate_all; in adm_dma_probe()
882 adev->common.device_config = adm_slave_config; in adm_dma_probe()
884 ret = dma_async_device_register(&adev->common); in adm_dma_probe()
886 dev_err(adev->dev, "failed to register dma async device\n"); in adm_dma_probe()
890 ret = of_dma_controller_register(pdev->dev.of_node, adm_dma_xlate, in adm_dma_probe()
891 &adev->common); in adm_dma_probe()
898 dma_async_device_unregister(&adev->common); in adm_dma_probe()
900 clk_disable_unprepare(adev->iface_clk); in adm_dma_probe()
902 clk_disable_unprepare(adev->core_clk); in adm_dma_probe()
913 of_dma_controller_free(pdev->dev.of_node); in adm_dma_remove()
914 dma_async_device_unregister(&adev->common); in adm_dma_remove()
917 achan = &adev->channels[i]; in adm_dma_remove()
920 writel(0, adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee)); in adm_dma_remove()
922 tasklet_kill(&adev->channels[i].vc.task); in adm_dma_remove()
923 adm_terminate_all(&adev->channels[i].vc.chan); in adm_dma_remove()
926 devm_free_irq(adev->dev, adev->irq, adev); in adm_dma_remove()
928 clk_disable_unprepare(adev->core_clk); in adm_dma_remove()
929 clk_disable_unprepare(adev->iface_clk); in adm_dma_remove()
944 .name = "adm-dma-engine",