Lines Matching +full:src +full:- +full:coef

13  * Copyright (c) 2010-2014 Freescale Semiconductor, Inc.
66 #include <linux/dma-mapping.h>
85 /* Add descriptors into per chan software queue - submit_q */
94 re_chan = container_of(tx->chan, struct fsl_re_chan, chan); in fsl_re_tx_submit()
96 spin_lock_irqsave(&re_chan->desc_lock, flags); in fsl_re_tx_submit()
98 list_add_tail(&desc->node, &re_chan->submit_q); in fsl_re_tx_submit()
99 spin_unlock_irqrestore(&re_chan->desc_lock, flags); in fsl_re_tx_submit()
114 spin_lock_irqsave(&re_chan->desc_lock, flags); in fsl_re_issue_pending()
116 in_be32(&re_chan->jrregs->inbring_slot_avail)); in fsl_re_issue_pending()
118 list_for_each_entry_safe(desc, _desc, &re_chan->submit_q, node) { in fsl_re_issue_pending()
122 list_move_tail(&desc->node, &re_chan->active_q); in fsl_re_issue_pending()
124 memcpy(&re_chan->inb_ring_virt_addr[re_chan->inb_count], in fsl_re_issue_pending()
125 &desc->hwdesc, sizeof(struct fsl_re_hw_desc)); in fsl_re_issue_pending()
127 re_chan->inb_count = (re_chan->inb_count + 1) & in fsl_re_issue_pending()
129 out_be32(&re_chan->jrregs->inbring_add_job, FSL_RE_ADD_JOB(1)); in fsl_re_issue_pending()
130 avail--; in fsl_re_issue_pending()
132 spin_unlock_irqrestore(&re_chan->desc_lock, flags); in fsl_re_issue_pending()
137 dma_cookie_complete(&desc->async_tx); in fsl_re_desc_done()
138 dma_descriptor_unmap(&desc->async_tx); in fsl_re_desc_done()
139 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); in fsl_re_desc_done()
147 spin_lock_irqsave(&re_chan->desc_lock, flags); in fsl_re_cleanup_descs()
148 list_for_each_entry_safe(desc, _desc, &re_chan->ack_q, node) { in fsl_re_cleanup_descs()
149 if (async_tx_test_ack(&desc->async_tx)) in fsl_re_cleanup_descs()
150 list_move_tail(&desc->node, &re_chan->free_q); in fsl_re_cleanup_descs()
152 spin_unlock_irqrestore(&re_chan->desc_lock, flags); in fsl_re_cleanup_descs()
154 fsl_re_issue_pending(&re_chan->chan); in fsl_re_cleanup_descs()
168 spin_lock_irqsave(&re_chan->desc_lock, flags); in fsl_re_dequeue()
169 count = FSL_RE_SLOT_FULL(in_be32(&re_chan->jrregs->oubring_slot_full)); in fsl_re_dequeue()
170 while (count--) { in fsl_re_dequeue()
172 hwdesc = &re_chan->oub_ring_virt_addr[re_chan->oub_count]; in fsl_re_dequeue()
173 list_for_each_entry_safe(desc, _desc, &re_chan->active_q, in fsl_re_dequeue()
176 if (desc->hwdesc.lbea32 == hwdesc->lbea32 && in fsl_re_dequeue()
177 desc->hwdesc.addr_low == hwdesc->addr_low) { in fsl_re_dequeue()
185 list_move_tail(&desc->node, &re_chan->ack_q); in fsl_re_dequeue()
187 dev_err(re_chan->dev, in fsl_re_dequeue()
191 oub_count = (re_chan->oub_count + 1) & FSL_RE_RING_SIZE_MASK; in fsl_re_dequeue()
192 re_chan->oub_count = oub_count; in fsl_re_dequeue()
194 out_be32(&re_chan->jrregs->oubring_job_rmvd, in fsl_re_dequeue()
197 spin_unlock_irqrestore(&re_chan->desc_lock, flags); in fsl_re_dequeue()
208 irqstate = in_be32(&re_chan->jrregs->jr_interrupt_status); in fsl_re_isr()
218 status = in_be32(&re_chan->jrregs->jr_status); in fsl_re_isr()
219 dev_err(re_chan->dev, "chan error irqstate: %x, status: %x\n", in fsl_re_isr()
224 out_be32(&re_chan->jrregs->jr_interrupt_status, FSL_RE_CLR_INTR); in fsl_re_isr()
226 tasklet_schedule(&re_chan->irqtask); in fsl_re_isr()
253 desc->re_chan = re_chan; in fsl_re_init_desc()
254 desc->async_tx.tx_submit = fsl_re_tx_submit; in fsl_re_init_desc()
255 dma_async_tx_descriptor_init(&desc->async_tx, &re_chan->chan); in fsl_re_init_desc()
256 INIT_LIST_HEAD(&desc->node); in fsl_re_init_desc()
258 desc->hwdesc.fmt32 = FSL_RE_FRAME_FORMAT << FSL_RE_HWDESC_FMT_SHIFT; in fsl_re_init_desc()
259 desc->hwdesc.lbea32 = upper_32_bits(paddr); in fsl_re_init_desc()
260 desc->hwdesc.addr_low = lower_32_bits(paddr); in fsl_re_init_desc()
261 desc->cf_addr = cf; in fsl_re_init_desc()
262 desc->cf_paddr = paddr; in fsl_re_init_desc()
264 desc->cdb_addr = (void *)(cf + FSL_RE_CF_DESC_SIZE); in fsl_re_init_desc()
265 desc->cdb_paddr = paddr + FSL_RE_CF_DESC_SIZE; in fsl_re_init_desc()
280 spin_lock_irqsave(&re_chan->desc_lock, lock_flag); in fsl_re_chan_alloc_desc()
281 if (!list_empty(&re_chan->free_q)) { in fsl_re_chan_alloc_desc()
283 desc = list_first_entry(&re_chan->free_q, in fsl_re_chan_alloc_desc()
285 list_del(&desc->node); in fsl_re_chan_alloc_desc()
287 desc->async_tx.flags = flags; in fsl_re_chan_alloc_desc()
289 spin_unlock_irqrestore(&re_chan->desc_lock, lock_flag); in fsl_re_chan_alloc_desc()
296 cf = dma_pool_alloc(re_chan->re_dev->cf_desc_pool, GFP_NOWAIT, in fsl_re_chan_alloc_desc()
304 desc->async_tx.flags = flags; in fsl_re_chan_alloc_desc()
306 spin_lock_irqsave(&re_chan->desc_lock, lock_flag); in fsl_re_chan_alloc_desc()
307 re_chan->alloc_count++; in fsl_re_chan_alloc_desc()
308 spin_unlock_irqrestore(&re_chan->desc_lock, lock_flag); in fsl_re_chan_alloc_desc()
315 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, in fsl_re_prep_dma_genq() argument
330 dev_err(re_chan->dev, "genq tx length %zu, max length %d\n", in fsl_re_prep_dma_genq()
346 cdb |= (src_cnt - 1) << FSL_RE_CDB_NRCS_SHIFT; in fsl_re_prep_dma_genq()
350 xor = desc->cdb_addr; in fsl_re_prep_dma_genq()
351 xor->cdb32 = cdb; in fsl_re_prep_dma_genq()
356 xor->gfm[i] = scf[i]; in fsl_re_prep_dma_genq()
358 xor->gfm[i++] = 1; in fsl_re_prep_dma_genq()
362 xor->gfm[i] = 1; in fsl_re_prep_dma_genq()
366 cf = desc->cf_addr; in fsl_re_prep_dma_genq()
367 fill_cfd_frame(cf, 0, sizeof(*xor), desc->cdb_paddr, 0); in fsl_re_prep_dma_genq()
374 fill_cfd_frame(cf, i, len, src[j], 0); in fsl_re_prep_dma_genq()
380 cf[i - 1].efrl32 |= 1 << FSL_RE_CF_FINAL_SHIFT; in fsl_re_prep_dma_genq()
382 return &desc->async_tx; in fsl_re_prep_dma_genq()
390 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, in fsl_re_prep_dma_xor() argument
393 /* NULL let genq take all coef as 1 */ in fsl_re_prep_dma_xor()
394 return fsl_re_prep_dma_genq(chan, dest, src, src_cnt, NULL, len, flags); in fsl_re_prep_dma_xor()
402 struct dma_chan *chan, dma_addr_t *dest, dma_addr_t *src, in fsl_re_prep_dma_pq() argument
417 dev_err(re_chan->dev, "pq tx length is %zu, max length is %d\n", in fsl_re_prep_dma_pq()
430 unsigned char coef[2]; in fsl_re_prep_dma_pq() local
432 dma_src[0] = *src; in fsl_re_prep_dma_pq()
433 coef[0] = *scf; in fsl_re_prep_dma_pq()
434 dma_src[1] = *src; in fsl_re_prep_dma_pq()
435 coef[1] = 0; in fsl_re_prep_dma_pq()
436 tx = fsl_re_prep_dma_genq(chan, dest[1], dma_src, 2, coef, len, in fsl_re_prep_dma_pq()
453 return fsl_re_prep_dma_genq(chan, dest[1], src, src_cnt, in fsl_re_prep_dma_pq()
465 cdb |= (src_cnt - 1) << FSL_RE_CDB_NRCS_SHIFT; in fsl_re_prep_dma_pq()
470 pq = desc->cdb_addr; in fsl_re_prep_dma_pq()
471 pq->cdb32 = cdb; in fsl_re_prep_dma_pq()
473 p = pq->gfm_q1; in fsl_re_prep_dma_pq()
487 cf = desc->cf_addr; in fsl_re_prep_dma_pq()
488 fill_cfd_frame(cf, 0, sizeof(struct fsl_re_pq_cdb), desc->cdb_paddr, 0); in fsl_re_prep_dma_pq()
496 fill_cfd_frame(cf, i, len, src[j], 0); in fsl_re_prep_dma_pq()
500 if (src_cnt - save_src_cnt == 3) { in fsl_re_prep_dma_pq()
508 dev_err(re_chan->dev, "PQ tx continuation error!\n"); in fsl_re_prep_dma_pq()
514 cf[i - 1].efrl32 |= 1 << FSL_RE_CF_FINAL_SHIFT; in fsl_re_prep_dma_pq()
516 return &desc->async_tx; in fsl_re_prep_dma_pq()
525 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, in fsl_re_prep_dma_memcpy() argument
538 dev_err(re_chan->dev, "cp tx length is %zu, max length is %d\n", in fsl_re_prep_dma_memcpy()
553 move = desc->cdb_addr; in fsl_re_prep_dma_memcpy()
554 move->cdb32 = cdb; in fsl_re_prep_dma_memcpy()
557 cf = desc->cf_addr; in fsl_re_prep_dma_memcpy()
558 fill_cfd_frame(cf, 0, sizeof(*move), desc->cdb_paddr, 0); in fsl_re_prep_dma_memcpy()
565 /* Fill CFD's 2nd frame with src buffer */ in fsl_re_prep_dma_memcpy()
566 fill_cfd_frame(cf, 2, length, src, 1); in fsl_re_prep_dma_memcpy()
568 return &desc->async_tx; in fsl_re_prep_dma_memcpy()
585 cf = dma_pool_alloc(re_chan->re_dev->cf_desc_pool, GFP_KERNEL, in fsl_re_alloc_chan_resources()
592 INIT_LIST_HEAD(&desc->node); in fsl_re_alloc_chan_resources()
595 list_add_tail(&desc->node, &re_chan->free_q); in fsl_re_alloc_chan_resources()
596 re_chan->alloc_count++; in fsl_re_alloc_chan_resources()
598 return re_chan->alloc_count; in fsl_re_alloc_chan_resources()
607 while (re_chan->alloc_count--) { in fsl_re_free_chan_resources()
608 desc = list_first_entry(&re_chan->free_q, in fsl_re_free_chan_resources()
612 list_del(&desc->node); in fsl_re_free_chan_resources()
613 dma_pool_free(re_chan->re_dev->cf_desc_pool, desc->cf_addr, in fsl_re_free_chan_resources()
614 desc->cf_paddr); in fsl_re_free_chan_resources()
618 if (!list_empty(&re_chan->free_q)) in fsl_re_free_chan_resources()
619 dev_err(re_chan->dev, "chan resource cannot be cleaned!\n"); in fsl_re_free_chan_resources()
634 dev = &ofdev->dev; in fsl_re_chan_probe()
636 dma_dev = &re_priv->dma_dev; in fsl_re_chan_probe()
640 return -ENOMEM; in fsl_re_chan_probe()
646 ret = -EINVAL; in fsl_re_chan_probe()
654 ret = -ENODEV; in fsl_re_chan_probe()
658 chan->jrregs = (struct fsl_re_chan_cfg *)((u8 *)re_priv->re_regs + in fsl_re_chan_probe()
662 chan->irq = irq_of_parse_and_map(np, 0); in fsl_re_chan_probe()
663 if (!chan->irq) { in fsl_re_chan_probe()
665 ret = -ENODEV; in fsl_re_chan_probe()
669 snprintf(chan->name, sizeof(chan->name), "re_jr%02d", q); in fsl_re_chan_probe()
671 chandev = &chan_ofdev->dev; in fsl_re_chan_probe()
672 tasklet_setup(&chan->irqtask, fsl_re_dequeue); in fsl_re_chan_probe()
674 ret = request_irq(chan->irq, fsl_re_isr, 0, chan->name, chandev); in fsl_re_chan_probe()
677 ret = -EINVAL; in fsl_re_chan_probe()
681 re_priv->re_jrs[q] = chan; in fsl_re_chan_probe()
682 chan->chan.device = dma_dev; in fsl_re_chan_probe()
683 chan->chan.private = chan; in fsl_re_chan_probe()
684 chan->dev = chandev; in fsl_re_chan_probe()
685 chan->re_dev = re_priv; in fsl_re_chan_probe()
687 spin_lock_init(&chan->desc_lock); in fsl_re_chan_probe()
688 INIT_LIST_HEAD(&chan->ack_q); in fsl_re_chan_probe()
689 INIT_LIST_HEAD(&chan->active_q); in fsl_re_chan_probe()
690 INIT_LIST_HEAD(&chan->submit_q); in fsl_re_chan_probe()
691 INIT_LIST_HEAD(&chan->free_q); in fsl_re_chan_probe()
693 chan->inb_ring_virt_addr = dma_pool_alloc(chan->re_dev->hw_desc_pool, in fsl_re_chan_probe()
694 GFP_KERNEL, &chan->inb_phys_addr); in fsl_re_chan_probe()
695 if (!chan->inb_ring_virt_addr) { in fsl_re_chan_probe()
697 ret = -ENOMEM; in fsl_re_chan_probe()
701 chan->oub_ring_virt_addr = dma_pool_alloc(chan->re_dev->hw_desc_pool, in fsl_re_chan_probe()
702 GFP_KERNEL, &chan->oub_phys_addr); in fsl_re_chan_probe()
703 if (!chan->oub_ring_virt_addr) { in fsl_re_chan_probe()
705 ret = -ENOMEM; in fsl_re_chan_probe()
710 out_be32(&chan->jrregs->inbring_base_h, in fsl_re_chan_probe()
711 chan->inb_phys_addr & FSL_RE_ADDR_BIT_MASK); in fsl_re_chan_probe()
712 out_be32(&chan->jrregs->oubring_base_h, in fsl_re_chan_probe()
713 chan->oub_phys_addr & FSL_RE_ADDR_BIT_MASK); in fsl_re_chan_probe()
714 out_be32(&chan->jrregs->inbring_base_l, in fsl_re_chan_probe()
715 chan->inb_phys_addr >> FSL_RE_ADDR_BIT_SHIFT); in fsl_re_chan_probe()
716 out_be32(&chan->jrregs->oubring_base_l, in fsl_re_chan_probe()
717 chan->oub_phys_addr >> FSL_RE_ADDR_BIT_SHIFT); in fsl_re_chan_probe()
718 out_be32(&chan->jrregs->inbring_size, in fsl_re_chan_probe()
720 out_be32(&chan->jrregs->oubring_size, in fsl_re_chan_probe()
723 /* Read LIODN value from u-boot */ in fsl_re_chan_probe()
724 status = in_be32(&chan->jrregs->jr_config_1) & FSL_RE_REG_LIODN_MASK; in fsl_re_chan_probe()
727 out_be32(&chan->jrregs->jr_config_1, in fsl_re_chan_probe()
733 out_be32(&chan->jrregs->jr_command, FSL_RE_ENABLE); in fsl_re_chan_probe()
738 dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr, in fsl_re_chan_probe()
739 chan->inb_phys_addr); in fsl_re_chan_probe()
755 struct device *dev = &ofdev->dev; in fsl_re_probe()
759 return -ENOMEM; in fsl_re_probe()
763 return -ENODEV; in fsl_re_probe()
766 re_priv->re_regs = devm_ioremap(dev, res->start, resource_size(res)); in fsl_re_probe()
767 if (!re_priv->re_regs) in fsl_re_probe()
768 return -EBUSY; in fsl_re_probe()
771 out_be32(&re_priv->re_regs->global_config, FSL_RE_NON_DPAA_MODE); in fsl_re_probe()
774 out_be32(&re_priv->re_regs->galois_field_config, FSL_RE_GFM_POLY); in fsl_re_probe()
777 in_be32(&re_priv->re_regs->re_version_id), in fsl_re_probe()
778 in_be32(&re_priv->re_regs->global_config), in fsl_re_probe()
779 in_be32(&re_priv->re_regs->galois_field_config)); in fsl_re_probe()
781 dma_dev = &re_priv->dma_dev; in fsl_re_probe()
782 dma_dev->dev = dev; in fsl_re_probe()
783 INIT_LIST_HEAD(&dma_dev->channels); in fsl_re_probe()
786 dma_dev->device_alloc_chan_resources = fsl_re_alloc_chan_resources; in fsl_re_probe()
787 dma_dev->device_tx_status = fsl_re_tx_status; in fsl_re_probe()
788 dma_dev->device_issue_pending = fsl_re_issue_pending; in fsl_re_probe()
790 dma_dev->max_xor = FSL_RE_MAX_XOR_SRCS; in fsl_re_probe()
791 dma_dev->device_prep_dma_xor = fsl_re_prep_dma_xor; in fsl_re_probe()
792 dma_cap_set(DMA_XOR, dma_dev->cap_mask); in fsl_re_probe()
794 dma_dev->max_pq = FSL_RE_MAX_PQ_SRCS; in fsl_re_probe()
795 dma_dev->device_prep_dma_pq = fsl_re_prep_dma_pq; in fsl_re_probe()
796 dma_cap_set(DMA_PQ, dma_dev->cap_mask); in fsl_re_probe()
798 dma_dev->device_prep_dma_memcpy = fsl_re_prep_dma_memcpy; in fsl_re_probe()
799 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); in fsl_re_probe()
801 dma_dev->device_free_chan_resources = fsl_re_free_chan_resources; in fsl_re_probe()
803 re_priv->total_chans = 0; in fsl_re_probe()
805 re_priv->cf_desc_pool = dmam_pool_create("fsl_re_cf_desc_pool", dev, in fsl_re_probe()
809 if (!re_priv->cf_desc_pool) { in fsl_re_probe()
811 return -ENOMEM; in fsl_re_probe()
814 re_priv->hw_desc_pool = dmam_pool_create("fsl_re_hw_desc_pool", dev, in fsl_re_probe()
817 if (!re_priv->hw_desc_pool) { in fsl_re_probe()
819 return -ENOMEM; in fsl_re_probe()
825 for_each_compatible_node(np, NULL, "fsl,raideng-v1.0-job-queue") { in fsl_re_probe()
830 return -ENODEV; in fsl_re_probe()
835 "fsl,raideng-v1.0-job-ring"); in fsl_re_probe()
838 re_priv->total_chans++; in fsl_re_probe()
850 tasklet_kill(&chan->irqtask); in fsl_re_remove_chan()
852 dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr, in fsl_re_remove_chan()
853 chan->inb_phys_addr); in fsl_re_remove_chan()
855 dma_pool_free(chan->re_dev->hw_desc_pool, chan->oub_ring_virt_addr, in fsl_re_remove_chan()
856 chan->oub_phys_addr); in fsl_re_remove_chan()
865 dev = &ofdev->dev; in fsl_re_remove()
869 for (i = 0; i < re_priv->total_chans; i++) in fsl_re_remove()
870 fsl_re_remove_chan(re_priv->re_jrs[i]); in fsl_re_remove()
873 dma_async_device_unregister(&re_priv->dma_dev); in fsl_re_remove()
879 { .compatible = "fsl,raideng-v1.0", },
886 .name = "fsl-raideng",