Lines Matching +full:dma +full:- +full:pool
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
12 #include <linux/dma-mapping.h>
42 * are to be re-defined
55 (kdev->instances + (idx << kdev->inst_shift))
58 list_for_each_entry_rcu(qh, &inst->handles, list, \
62 for (idx = 0, inst = kdev->instances; \
63 idx < (kdev)->num_queues_in_use; \
82 * @inst: - qmss queue instance like accumulator
93 if (atomic_read(&qh->notifier_enabled) <= 0) in knav_queue_notify()
95 if (WARN_ON(!qh->notifier_fn)) in knav_queue_notify()
97 this_cpu_inc(qh->stats->notifies); in knav_queue_notify()
98 qh->notifier_fn(qh->notifier_fn_arg); in knav_queue_notify()
115 unsigned queue = inst->id - range->queue_base; in knav_queue_setup_irq()
118 if (range->flags & RANGE_HAS_IRQ) { in knav_queue_setup_irq()
119 irq = range->irqs[queue].irq; in knav_queue_setup_irq()
121 inst->irq_name, inst); in knav_queue_setup_irq()
125 if (range->irqs[queue].cpu_mask) { in knav_queue_setup_irq()
126 ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask); in knav_queue_setup_irq()
128 dev_warn(range->kdev->dev, in knav_queue_setup_irq()
139 struct knav_range_info *range = inst->range; in knav_queue_free_irq()
140 unsigned queue = inst->id - inst->range->queue_base; in knav_queue_free_irq()
143 if (range->flags & RANGE_HAS_IRQ) { in knav_queue_free_irq()
144 irq = range->irqs[queue].irq; in knav_queue_free_irq()
152 return !list_empty(&inst->handles); in knav_queue_is_busy()
157 return inst->range->flags & RANGE_RESERVED; in knav_queue_is_reserved()
166 if (tmp->flags & KNAV_QUEUE_SHARED) { in knav_queue_is_shared()
179 (inst->range->flags & RANGE_HAS_IRQ)) { in knav_queue_match_type()
182 (inst->range->flags & RANGE_HAS_ACCUMULATOR)) { in knav_queue_match_type()
185 !(inst->range->flags & in knav_queue_match_type()
199 if (inst->id == id) in knav_queue_match_id_to_inst()
207 if (kdev->base_id <= id && in knav_queue_find_by_id()
208 kdev->base_id + kdev->num_queues > id) { in knav_queue_find_by_id()
209 id -= kdev->base_id; in knav_queue_find_by_id()
222 qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL); in __knav_queue_open()
224 return ERR_PTR(-ENOMEM); in __knav_queue_open()
226 qh->stats = alloc_percpu(struct knav_queue_stats); in __knav_queue_open()
227 if (!qh->stats) { in __knav_queue_open()
228 ret = -ENOMEM; in __knav_queue_open()
232 qh->flags = flags; in __knav_queue_open()
233 qh->inst = inst; in __knav_queue_open()
234 id = inst->id - inst->qmgr->start_queue; in __knav_queue_open()
235 qh->reg_push = &inst->qmgr->reg_push[id]; in __knav_queue_open()
236 qh->reg_pop = &inst->qmgr->reg_pop[id]; in __knav_queue_open()
237 qh->reg_peek = &inst->qmgr->reg_peek[id]; in __knav_queue_open()
241 struct knav_range_info *range = inst->range; in __knav_queue_open()
243 inst->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL); in __knav_queue_open()
244 if (range->ops && range->ops->open_queue) in __knav_queue_open()
245 ret = range->ops->open_queue(range, inst, flags); in __knav_queue_open()
250 list_add_tail_rcu(&qh->list, &inst->handles); in __knav_queue_open()
254 if (qh->stats) in __knav_queue_open()
255 free_percpu(qh->stats); in __knav_queue_open()
256 devm_kfree(inst->kdev->dev, qh); in __knav_queue_open()
268 qh = ERR_PTR(-ENODEV); in knav_queue_open_by_id()
273 qh = ERR_PTR(-EEXIST); in knav_queue_open_by_id()
277 qh = ERR_PTR(-EBUSY); in knav_queue_open_by_id()
294 struct knav_queue *qh = ERR_PTR(-EINVAL); in knav_queue_open_by_type()
317 struct knav_range_info *range = inst->range; in knav_queue_set_notify()
319 if (range->ops && range->ops->set_notify) in knav_queue_set_notify()
320 range->ops->set_notify(range, inst, enabled); in knav_queue_set_notify()
325 struct knav_queue_inst *inst = qh->inst; in knav_queue_enable_notifier()
328 if (WARN_ON(!qh->notifier_fn)) in knav_queue_enable_notifier()
329 return -EINVAL; in knav_queue_enable_notifier()
332 first = (atomic_inc_return(&qh->notifier_enabled) == 1); in knav_queue_enable_notifier()
337 first = (atomic_inc_return(&inst->num_notifiers) == 1); in knav_queue_enable_notifier()
346 struct knav_queue_inst *inst = qh->inst; in knav_queue_disable_notifier()
349 last = (atomic_dec_return(&qh->notifier_enabled) == 0); in knav_queue_disable_notifier()
353 last = (atomic_dec_return(&inst->num_notifiers) == 0); in knav_queue_disable_notifier()
363 knav_queue_notify_fn old_fn = qh->notifier_fn; in knav_queue_set_notifier()
366 return -EINVAL; in knav_queue_set_notifier()
368 if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) in knav_queue_set_notifier()
369 return -ENOTSUPP; in knav_queue_set_notifier()
371 if (!cfg->fn && old_fn) in knav_queue_set_notifier()
374 qh->notifier_fn = cfg->fn; in knav_queue_set_notifier()
375 qh->notifier_fn_arg = cfg->fn_arg; in knav_queue_set_notifier()
377 if (cfg->fn && !old_fn) in knav_queue_set_notifier()
389 if (range->flags & RANGE_HAS_IRQ) { in knav_gp_set_notify()
390 queue = inst->id - range->queue_base; in knav_gp_set_notify()
392 enable_irq(range->irqs[queue].irq); in knav_gp_set_notify()
394 disable_irq_nosync(range->irqs[queue].irq); in knav_gp_set_notify()
422 struct knav_queue_inst *inst = qh->inst; in knav_queue_get_count()
424 return readl_relaxed(&qh->reg_peek[0].entry_count) + in knav_queue_get_count()
425 atomic_read(&inst->desc_count); in knav_queue_get_count()
431 struct knav_device *kdev = inst->kdev; in knav_queue_debug_show_instance()
444 kdev->base_id + inst->id, inst->name); in knav_queue_debug_show_instance()
447 pushes += per_cpu_ptr(qh->stats, cpu)->pushes; in knav_queue_debug_show_instance()
448 pops += per_cpu_ptr(qh->stats, cpu)->pops; in knav_queue_debug_show_instance()
449 push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors; in knav_queue_debug_show_instance()
450 pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors; in knav_queue_debug_show_instance()
451 notifies += per_cpu_ptr(qh->stats, cpu)->notifies; in knav_queue_debug_show_instance()
471 seq_printf(s, "%s: %u-%u\n", in knav_queue_debug_show()
472 dev_name(kdev->dev), kdev->base_id, in knav_queue_debug_show()
473 kdev->base_id + kdev->num_queues - 1); in knav_queue_debug_show()
498 return val ? -ETIMEDOUT : 0; in knav_queue_pdsp_wait()
504 struct knav_queue_inst *inst = qh->inst; in knav_queue_flush()
505 unsigned id = inst->id - inst->qmgr->start_queue; in knav_queue_flush()
507 atomic_set(&inst->desc_count, 0); in knav_queue_flush()
508 writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh); in knav_queue_flush()
513 * knav_queue_open() - open a hardware queue
514 * @name: - name to give the queue handle
515 * @id: - desired queue number if any or specifes the type
517 * @flags: - the following flags are applicable to queues:
518 * KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
529 struct knav_queue *qh = ERR_PTR(-EINVAL); in knav_queue_open()
547 * knav_queue_close() - close a hardware queue handle
548 * @qhandle: - handle to close
553 struct knav_queue_inst *inst = qh->inst; in knav_queue_close()
555 while (atomic_read(&qh->notifier_enabled) > 0) in knav_queue_close()
559 list_del_rcu(&qh->list); in knav_queue_close()
563 struct knav_range_info *range = inst->range; in knav_queue_close()
565 if (range->ops && range->ops->close_queue) in knav_queue_close()
566 range->ops->close_queue(range, inst); in knav_queue_close()
568 free_percpu(qh->stats); in knav_queue_close()
569 devm_kfree(inst->kdev->dev, qh); in knav_queue_close()
574 * knav_queue_device_control() - Perform control operations on a queue
575 * @qhandle: - queue handle
576 * @cmd: - control commands
577 * @arg: - command argument
590 ret = qh->inst->kdev->base_id + qh->inst->id; in knav_queue_device_control()
615 ret = -ENOTSUPP; in knav_queue_device_control()
625 * knav_queue_push() - push data (or descriptor) to the tail of a queue
626 * @qhandle: - hardware queue handle
627 * @dma: - DMA data to push
628 * @size: - size of data to push
629 * @flags: - can be used to pass additional information
633 int knav_queue_push(void *qhandle, dma_addr_t dma, in knav_queue_push() argument
639 val = (u32)dma | ((size / 16) - 1); in knav_queue_push()
640 writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh); in knav_queue_push()
642 this_cpu_inc(qh->stats->pushes); in knav_queue_push()
648 * knav_queue_pop() - pop data (or descriptor) from the head of a queue
649 * @qhandle: - hardware queue handle
650 * @size: - (optional) size of the data pop'ed.
652 * Returns a DMA address on success, 0 on failure.
657 struct knav_queue_inst *inst = qh->inst; in knav_queue_pop()
658 dma_addr_t dma; in knav_queue_pop() local
662 if (inst->descs) { in knav_queue_pop()
663 if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) { in knav_queue_pop()
664 atomic_inc(&inst->desc_count); in knav_queue_pop()
667 idx = atomic_inc_return(&inst->desc_head); in knav_queue_pop()
669 val = inst->descs[idx]; in knav_queue_pop()
671 val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh); in knav_queue_pop()
676 dma = val & DESC_PTR_MASK; in knav_queue_pop()
680 this_cpu_inc(qh->stats->pops); in knav_queue_pop()
681 return dma; in knav_queue_pop()
686 static void kdesc_fill_pool(struct knav_pool *pool) in kdesc_fill_pool() argument
691 region = pool->region; in kdesc_fill_pool()
692 pool->desc_size = region->desc_size; in kdesc_fill_pool()
693 for (i = 0; i < pool->num_desc; i++) { in kdesc_fill_pool()
694 int index = pool->region_offset + i; in kdesc_fill_pool()
697 dma_addr = region->dma_start + (region->desc_size * index); in kdesc_fill_pool()
698 dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES); in kdesc_fill_pool()
699 dma_sync_single_for_device(pool->dev, dma_addr, dma_size, in kdesc_fill_pool()
701 knav_queue_push(pool->queue, dma_addr, dma_size, 0); in kdesc_fill_pool()
706 static void kdesc_empty_pool(struct knav_pool *pool) in kdesc_empty_pool() argument
708 dma_addr_t dma; in kdesc_empty_pool() local
713 if (!pool->queue) in kdesc_empty_pool()
717 dma = knav_queue_pop(pool->queue, &size); in kdesc_empty_pool()
718 if (!dma) in kdesc_empty_pool()
720 desc = knav_pool_desc_dma_to_virt(pool, dma); in kdesc_empty_pool()
722 dev_dbg(pool->kdev->dev, in kdesc_empty_pool()
727 WARN_ON(i != pool->num_desc); in kdesc_empty_pool()
728 knav_queue_close(pool->queue); in kdesc_empty_pool()
732 /* Get the DMA address of a descriptor */
735 struct knav_pool *pool = ph; in knav_pool_desc_virt_to_dma() local
736 return pool->region->dma_start + (virt - pool->region->virt_start); in knav_pool_desc_virt_to_dma()
740 void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma) in knav_pool_desc_dma_to_virt() argument
742 struct knav_pool *pool = ph; in knav_pool_desc_dma_to_virt() local
743 return pool->region->virt_start + (dma - pool->region->dma_start); in knav_pool_desc_dma_to_virt()
748 * knav_pool_create() - Create a pool of descriptors
749 * @name: - name to give the pool handle
750 * @num_desc: - numbers of descriptors in the pool
751 * @region_id: - QMSS region id from which the descriptors are to be
754 * Returns a pool handle on success.
761 struct knav_pool *pool, *pi = NULL, *iter; in knav_pool_create() local
767 return ERR_PTR(-EPROBE_DEFER); in knav_pool_create()
769 if (!kdev->dev) in knav_pool_create()
770 return ERR_PTR(-ENODEV); in knav_pool_create()
772 pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL); in knav_pool_create()
773 if (!pool) { in knav_pool_create()
774 dev_err(kdev->dev, "out of memory allocating pool\n"); in knav_pool_create()
775 return ERR_PTR(-ENOMEM); in knav_pool_create()
779 if (reg_itr->id != region_id) in knav_pool_create()
786 dev_err(kdev->dev, "region-id(%d) not found\n", region_id); in knav_pool_create()
787 ret = -EINVAL; in knav_pool_create()
791 pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0); in knav_pool_create()
792 if (IS_ERR(pool->queue)) { in knav_pool_create()
793 dev_err(kdev->dev, in knav_pool_create()
794 "failed to open queue for pool(%s), error %ld\n", in knav_pool_create()
795 name, PTR_ERR(pool->queue)); in knav_pool_create()
796 ret = PTR_ERR(pool->queue); in knav_pool_create()
800 pool->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL); in knav_pool_create()
801 pool->kdev = kdev; in knav_pool_create()
802 pool->dev = kdev->dev; in knav_pool_create()
806 if (num_desc > (region->num_desc - region->used_desc)) { in knav_pool_create()
807 dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n", in knav_pool_create()
809 ret = -ENOMEM; in knav_pool_create()
818 node = ®ion->pools; in knav_pool_create()
819 list_for_each_entry(iter, ®ion->pools, region_inst) { in knav_pool_create()
820 if ((iter->region_offset - last_offset) >= num_desc) { in knav_pool_create()
824 last_offset = iter->region_offset + iter->num_desc; in knav_pool_create()
828 node = &pi->region_inst; in knav_pool_create()
829 pool->region = region; in knav_pool_create()
830 pool->num_desc = num_desc; in knav_pool_create()
831 pool->region_offset = last_offset; in knav_pool_create()
832 region->used_desc += num_desc; in knav_pool_create()
833 list_add_tail(&pool->list, &kdev->pools); in knav_pool_create()
834 list_add_tail(&pool->region_inst, node); in knav_pool_create()
836 dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n", in knav_pool_create()
838 ret = -ENOMEM; in knav_pool_create()
843 kdesc_fill_pool(pool); in knav_pool_create()
844 return pool; in knav_pool_create()
849 kfree(pool->name); in knav_pool_create()
850 devm_kfree(kdev->dev, pool); in knav_pool_create()
856 * knav_pool_destroy() - Free a pool of descriptors
857 * @ph: - pool handle
861 struct knav_pool *pool = ph; in knav_pool_destroy() local
863 if (!pool) in knav_pool_destroy()
866 if (!pool->region) in knav_pool_destroy()
869 kdesc_empty_pool(pool); in knav_pool_destroy()
872 pool->region->used_desc -= pool->num_desc; in knav_pool_destroy()
873 list_del(&pool->region_inst); in knav_pool_destroy()
874 list_del(&pool->list); in knav_pool_destroy()
877 kfree(pool->name); in knav_pool_destroy()
878 devm_kfree(kdev->dev, pool); in knav_pool_destroy()
884 * knav_pool_desc_get() - Get a descriptor from the pool
885 * @ph: - pool handle
887 * Returns descriptor from the pool.
891 struct knav_pool *pool = ph; in knav_pool_desc_get() local
892 dma_addr_t dma; in knav_pool_desc_get() local
896 dma = knav_queue_pop(pool->queue, &size); in knav_pool_desc_get()
897 if (unlikely(!dma)) in knav_pool_desc_get()
898 return ERR_PTR(-ENOMEM); in knav_pool_desc_get()
899 data = knav_pool_desc_dma_to_virt(pool, dma); in knav_pool_desc_get()
905 * knav_pool_desc_put() - return a descriptor to the pool
906 * @ph: - pool handle
907 * @desc: - virtual address
911 struct knav_pool *pool = ph; in knav_pool_desc_put() local
912 dma_addr_t dma; in knav_pool_desc_put() local
913 dma = knav_pool_desc_virt_to_dma(pool, desc); in knav_pool_desc_put()
914 knav_queue_push(pool->queue, dma, pool->region->desc_size, 0); in knav_pool_desc_put()
919 * knav_pool_desc_map() - Map descriptor for DMA transfer
920 * @ph: - pool handle
921 * @desc: - address of descriptor to map
922 * @size: - size of descriptor to map
923 * @dma: - DMA address return pointer
924 * @dma_sz: - adjusted return pointer
929 dma_addr_t *dma, unsigned *dma_sz) in knav_pool_desc_map() argument
931 struct knav_pool *pool = ph; in knav_pool_desc_map() local
932 *dma = knav_pool_desc_virt_to_dma(pool, desc); in knav_pool_desc_map()
933 size = min(size, pool->region->desc_size); in knav_pool_desc_map()
936 dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE); in knav_pool_desc_map()
946 * knav_pool_desc_unmap() - Unmap descriptor after DMA transfer
947 * @ph: - pool handle
948 * @dma: - DMA address of descriptor to unmap
949 * @dma_sz: - size of descriptor to unmap
954 void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz) in knav_pool_desc_unmap() argument
956 struct knav_pool *pool = ph; in knav_pool_desc_unmap() local
960 desc_sz = min(dma_sz, pool->region->desc_size); in knav_pool_desc_unmap()
961 desc = knav_pool_desc_dma_to_virt(pool, dma); in knav_pool_desc_unmap()
962 dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE); in knav_pool_desc_unmap()
969 * knav_pool_count() - Get the number of descriptors in pool.
970 * @ph: - pool handle
971 * Returns number of elements in the pool.
975 struct knav_pool *pool = ph; in knav_pool_count() local
976 return knav_queue_get_count(pool->queue); in knav_pool_count()
986 struct knav_pool *pool; in knav_queue_setup_region() local
987 int id = region->id; in knav_queue_setup_region()
991 if (!region->num_desc) { in knav_queue_setup_region()
992 dev_warn(kdev->dev, "unused region %s\n", region->name); in knav_queue_setup_region()
997 hw_num_desc = ilog2(region->num_desc - 1) + 1; in knav_queue_setup_region()
1000 if (region->num_desc < 32) { in knav_queue_setup_region()
1001 region->num_desc = 0; in knav_queue_setup_region()
1002 dev_warn(kdev->dev, "too few descriptors in region %s\n", in knav_queue_setup_region()
1003 region->name); in knav_queue_setup_region()
1007 size = region->num_desc * region->desc_size; in knav_queue_setup_region()
1008 region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA | in knav_queue_setup_region()
1010 if (!region->virt_start) { in knav_queue_setup_region()
1011 region->num_desc = 0; in knav_queue_setup_region()
1012 dev_err(kdev->dev, "memory alloc failed for region %s\n", in knav_queue_setup_region()
1013 region->name); in knav_queue_setup_region()
1016 region->virt_end = region->virt_start + size; in knav_queue_setup_region()
1017 page = virt_to_page(region->virt_start); in knav_queue_setup_region()
1019 region->dma_start = dma_map_page(kdev->dev, page, 0, size, in knav_queue_setup_region()
1021 if (dma_mapping_error(kdev->dev, region->dma_start)) { in knav_queue_setup_region()
1022 dev_err(kdev->dev, "dma map failed for region %s\n", in knav_queue_setup_region()
1023 region->name); in knav_queue_setup_region()
1026 region->dma_end = region->dma_start + size; in knav_queue_setup_region()
1028 pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL); in knav_queue_setup_region()
1029 if (!pool) { in knav_queue_setup_region()
1030 dev_err(kdev->dev, "out of memory allocating dummy pool\n"); in knav_queue_setup_region()
1033 pool->num_desc = 0; in knav_queue_setup_region()
1034 pool->region_offset = region->num_desc; in knav_queue_setup_region()
1035 list_add(&pool->region_inst, ®ion->pools); in knav_queue_setup_region()
1037 dev_dbg(kdev->dev, in knav_queue_setup_region()
1038 "region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n", in knav_queue_setup_region()
1039 region->name, id, region->desc_size, region->num_desc, in knav_queue_setup_region()
1040 region->link_index, ®ion->dma_start, ®ion->dma_end, in knav_queue_setup_region()
1041 region->virt_start, region->virt_end); in knav_queue_setup_region()
1043 hw_desc_size = (region->desc_size / 16) - 1; in knav_queue_setup_region()
1044 hw_num_desc -= 5; in knav_queue_setup_region()
1047 regs = qmgr->reg_region + id; in knav_queue_setup_region()
1048 writel_relaxed((u32)region->dma_start, ®s->base); in knav_queue_setup_region()
1049 writel_relaxed(region->link_index, ®s->start_index); in knav_queue_setup_region()
1051 ®s->size_count); in knav_queue_setup_region()
1056 if (region->dma_start) in knav_queue_setup_region()
1057 dma_unmap_page(kdev->dev, region->dma_start, size, in knav_queue_setup_region()
1059 if (region->virt_start) in knav_queue_setup_region()
1060 free_pages_exact(region->virt_start, size); in knav_queue_setup_region()
1061 region->num_desc = 0; in knav_queue_setup_region()
1070 name = node->name; in knav_queue_find_name()
1079 struct device *dev = kdev->dev; in knav_queue_setup_regions()
1090 return -ENOMEM; in knav_queue_setup_regions()
1093 region->name = knav_queue_find_name(child); in knav_queue_setup_regions()
1094 of_property_read_u32(child, "id", ®ion->id); in knav_queue_setup_regions()
1095 ret = of_property_read_u32_array(child, "region-spec", temp, 2); in knav_queue_setup_regions()
1097 region->num_desc = temp[0]; in knav_queue_setup_regions()
1098 region->desc_size = temp[1]; in knav_queue_setup_regions()
1100 dev_err(dev, "invalid region info %s\n", region->name); in knav_queue_setup_regions()
1105 if (!of_get_property(child, "link-index", NULL)) { in knav_queue_setup_regions()
1106 dev_err(dev, "No link info for %s\n", region->name); in knav_queue_setup_regions()
1110 ret = of_property_read_u32(child, "link-index", in knav_queue_setup_regions()
1111 ®ion->link_index); in knav_queue_setup_regions()
1114 region->name); in knav_queue_setup_regions()
1119 INIT_LIST_HEAD(®ion->pools); in knav_queue_setup_regions()
1120 list_add_tail(®ion->list, &kdev->regions); in knav_queue_setup_regions()
1122 if (list_empty(&kdev->regions)) { in knav_queue_setup_regions()
1124 return -ENODEV; in knav_queue_setup_regions()
1138 struct platform_device *pdev = to_platform_device(kdev->dev); in knav_get_link_ram()
1139 struct device_node *node = pdev->dev.of_node; in knav_get_link_ram()
1145 * 64-bit entities here. in knav_get_link_ram()
1147 * For example, to specify the internal link ram for Keystone-I class in knav_get_link_ram()
1148 * devices, we would set the linkram0 resource to 0x80000-0x83fff. in knav_get_link_ram()
1151 * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries in knav_get_link_ram()
1152 * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000, in knav_get_link_ram()
1153 * which accounts for 64-bits per entry, for 16K entries. in knav_get_link_ram()
1159 * link ram WARNING - we do not "reserve" this block in knav_get_link_ram()
1161 block->dma = (dma_addr_t)temp[0]; in knav_get_link_ram()
1162 block->virt = NULL; in knav_get_link_ram()
1163 block->size = temp[1]; in knav_get_link_ram()
1165 block->size = temp[1]; in knav_get_link_ram()
1167 block->virt = dmam_alloc_coherent(kdev->dev, in knav_get_link_ram()
1168 8 * block->size, &block->dma, in knav_get_link_ram()
1170 if (!block->virt) { in knav_get_link_ram()
1171 dev_err(kdev->dev, "failed to alloc linkram\n"); in knav_get_link_ram()
1172 return -ENOMEM; in knav_get_link_ram()
1176 return -ENODEV; in knav_get_link_ram()
1187 block = &kdev->link_rams[0]; in knav_queue_setup_link_ram()
1188 dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n", in knav_queue_setup_link_ram()
1189 &block->dma, block->virt, block->size); in knav_queue_setup_link_ram()
1190 writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0); in knav_queue_setup_link_ram()
1191 if (kdev->version == QMSS_66AK2G) in knav_queue_setup_link_ram()
1192 writel_relaxed(block->size, in knav_queue_setup_link_ram()
1193 &qmgr->reg_config->link_ram_size0); in knav_queue_setup_link_ram()
1195 writel_relaxed(block->size - 1, in knav_queue_setup_link_ram()
1196 &qmgr->reg_config->link_ram_size0); in knav_queue_setup_link_ram()
1198 if (!block->size) in knav_queue_setup_link_ram()
1201 dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n", in knav_queue_setup_link_ram()
1202 &block->dma, block->virt, block->size); in knav_queue_setup_link_ram()
1203 writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1); in knav_queue_setup_link_ram()
1212 struct device *dev = kdev->dev; in knav_setup_queue_range()
1221 return -ENOMEM; in knav_setup_queue_range()
1224 range->kdev = kdev; in knav_setup_queue_range()
1225 range->name = knav_queue_find_name(node); in knav_setup_queue_range()
1228 range->queue_base = temp[0] - kdev->base_id; in knav_setup_queue_range()
1229 range->num_queues = temp[1]; in knav_setup_queue_range()
1231 dev_err(dev, "invalid queue range %s\n", range->name); in knav_setup_queue_range()
1233 return -EINVAL; in knav_setup_queue_range()
1242 range->irqs[i].irq = irq_create_of_mapping(&oirq); in knav_setup_queue_range()
1243 if (range->irqs[i].irq == IRQ_NONE) in knav_setup_queue_range()
1246 range->num_irqs++; in knav_setup_queue_range()
1252 range->irqs[i].cpu_mask = devm_kzalloc(dev, in knav_setup_queue_range()
1254 if (!range->irqs[i].cpu_mask) in knav_setup_queue_range()
1255 return -ENOMEM; in knav_setup_queue_range()
1259 cpumask_set_cpu(bit, range->irqs[i].cpu_mask); in knav_setup_queue_range()
1263 range->num_irqs = min(range->num_irqs, range->num_queues); in knav_setup_queue_range()
1264 if (range->num_irqs) in knav_setup_queue_range()
1265 range->flags |= RANGE_HAS_IRQ; in knav_setup_queue_range()
1267 if (of_get_property(node, "qalloc-by-id", NULL)) in knav_setup_queue_range()
1268 range->flags |= RANGE_RESERVED; in knav_setup_queue_range()
1277 range->ops = &knav_gp_range_ops; in knav_setup_queue_range()
1282 start = max(qmgr->start_queue, range->queue_base); in knav_setup_queue_range()
1283 end = min(qmgr->start_queue + qmgr->num_queues, in knav_setup_queue_range()
1284 range->queue_base + range->num_queues); in knav_setup_queue_range()
1286 index = id - qmgr->start_queue; in knav_setup_queue_range()
1288 &qmgr->reg_peek[index].ptr_size_thresh); in knav_setup_queue_range()
1290 &qmgr->reg_push[index].ptr_size_thresh); in knav_setup_queue_range()
1294 list_add_tail(&range->list, &kdev->queue_ranges); in knav_setup_queue_range()
1295 dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n", in knav_setup_queue_range()
1296 range->name, range->queue_base, in knav_setup_queue_range()
1297 range->queue_base + range->num_queues - 1, in knav_setup_queue_range()
1298 range->num_irqs, in knav_setup_queue_range()
1299 (range->flags & RANGE_HAS_IRQ) ? ", has irq" : "", in knav_setup_queue_range()
1300 (range->flags & RANGE_RESERVED) ? ", reserved" : "", in knav_setup_queue_range()
1301 (range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : ""); in knav_setup_queue_range()
1302 kdev->num_queues_in_use += range->num_queues; in knav_setup_queue_range()
1319 if (list_empty(&kdev->queue_ranges)) { in knav_setup_queue_pools()
1320 dev_err(kdev->dev, "no valid queue range found\n"); in knav_setup_queue_pools()
1321 return -ENODEV; in knav_setup_queue_pools()
1329 if (range->ops && range->ops->free_range) in knav_free_queue_range()
1330 range->ops->free_range(range); in knav_free_queue_range()
1331 list_del(&range->list); in knav_free_queue_range()
1332 devm_kfree(kdev->dev, range); in knav_free_queue_range()
1350 struct knav_pool *pool, *tmp; in knav_queue_free_regions() local
1357 list_for_each_entry_safe(pool, tmp, ®ion->pools, region_inst) in knav_queue_free_regions()
1358 knav_pool_destroy(pool); in knav_queue_free_regions()
1360 size = region->virt_end - region->virt_start; in knav_queue_free_regions()
1362 free_pages_exact(region->virt_start, size); in knav_queue_free_regions()
1363 list_del(®ion->list); in knav_queue_free_regions()
1364 devm_kfree(kdev->dev, region); in knav_queue_free_regions()
1377 dev_err(kdev->dev, "Can't translate of node(%pOFn) address for index(%d)\n", in knav_queue_map_reg()
1382 regs = devm_ioremap_resource(kdev->dev, &res); in knav_queue_map_reg()
1384 dev_err(kdev->dev, "Failed to map register base for index(%d) node(%pOFn)\n", in knav_queue_map_reg()
1392 struct device *dev = kdev->dev; in knav_queue_init_qmgrs()
1403 return -ENOMEM; in knav_queue_init_qmgrs()
1406 ret = of_property_read_u32_array(child, "managed-queues", in knav_queue_init_qmgrs()
1409 qmgr->start_queue = temp[0]; in knav_queue_init_qmgrs()
1410 qmgr->num_queues = temp[1]; in knav_queue_init_qmgrs()
1418 qmgr->start_queue, qmgr->num_queues); in knav_queue_init_qmgrs()
1420 qmgr->reg_peek = in knav_queue_init_qmgrs()
1424 if (kdev->version == QMSS) { in knav_queue_init_qmgrs()
1425 qmgr->reg_status = in knav_queue_init_qmgrs()
1430 qmgr->reg_config = in knav_queue_init_qmgrs()
1432 (kdev->version == QMSS_66AK2G) ? in knav_queue_init_qmgrs()
1435 qmgr->reg_region = in knav_queue_init_qmgrs()
1437 (kdev->version == QMSS_66AK2G) ? in knav_queue_init_qmgrs()
1441 qmgr->reg_push = in knav_queue_init_qmgrs()
1443 (kdev->version == QMSS_66AK2G) ? in knav_queue_init_qmgrs()
1447 if (kdev->version == QMSS) { in knav_queue_init_qmgrs()
1448 qmgr->reg_pop = in knav_queue_init_qmgrs()
1453 if (IS_ERR(qmgr->reg_peek) || in knav_queue_init_qmgrs()
1454 ((kdev->version == QMSS) && in knav_queue_init_qmgrs()
1455 (IS_ERR(qmgr->reg_status) || IS_ERR(qmgr->reg_pop))) || in knav_queue_init_qmgrs()
1456 IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) || in knav_queue_init_qmgrs()
1457 IS_ERR(qmgr->reg_push)) { in knav_queue_init_qmgrs()
1459 if (kdev->version == QMSS) { in knav_queue_init_qmgrs()
1460 if (!IS_ERR(qmgr->reg_status)) in knav_queue_init_qmgrs()
1461 devm_iounmap(dev, qmgr->reg_status); in knav_queue_init_qmgrs()
1462 if (!IS_ERR(qmgr->reg_pop)) in knav_queue_init_qmgrs()
1463 devm_iounmap(dev, qmgr->reg_pop); in knav_queue_init_qmgrs()
1465 if (!IS_ERR(qmgr->reg_peek)) in knav_queue_init_qmgrs()
1466 devm_iounmap(dev, qmgr->reg_peek); in knav_queue_init_qmgrs()
1467 if (!IS_ERR(qmgr->reg_config)) in knav_queue_init_qmgrs()
1468 devm_iounmap(dev, qmgr->reg_config); in knav_queue_init_qmgrs()
1469 if (!IS_ERR(qmgr->reg_region)) in knav_queue_init_qmgrs()
1470 devm_iounmap(dev, qmgr->reg_region); in knav_queue_init_qmgrs()
1471 if (!IS_ERR(qmgr->reg_push)) in knav_queue_init_qmgrs()
1472 devm_iounmap(dev, qmgr->reg_push); in knav_queue_init_qmgrs()
1478 if (kdev->version == QMSS_66AK2G) in knav_queue_init_qmgrs()
1479 qmgr->reg_pop = qmgr->reg_push; in knav_queue_init_qmgrs()
1481 list_add_tail(&qmgr->list, &kdev->qmgrs); in knav_queue_init_qmgrs()
1483 qmgr->start_queue, qmgr->num_queues, in knav_queue_init_qmgrs()
1484 qmgr->reg_peek, qmgr->reg_status, in knav_queue_init_qmgrs()
1485 qmgr->reg_config, qmgr->reg_region, in knav_queue_init_qmgrs()
1486 qmgr->reg_push, qmgr->reg_pop); in knav_queue_init_qmgrs()
1494 struct device *dev = kdev->dev; in knav_queue_init_pdsps()
1503 return -ENOMEM; in knav_queue_init_pdsps()
1505 pdsp->name = knav_queue_find_name(child); in knav_queue_init_pdsps()
1506 pdsp->iram = in knav_queue_init_pdsps()
1509 pdsp->regs = in knav_queue_init_pdsps()
1512 pdsp->intd = in knav_queue_init_pdsps()
1515 pdsp->command = in knav_queue_init_pdsps()
1519 if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) || in knav_queue_init_pdsps()
1520 IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) { in knav_queue_init_pdsps()
1522 pdsp->name); in knav_queue_init_pdsps()
1523 if (!IS_ERR(pdsp->command)) in knav_queue_init_pdsps()
1524 devm_iounmap(dev, pdsp->command); in knav_queue_init_pdsps()
1525 if (!IS_ERR(pdsp->iram)) in knav_queue_init_pdsps()
1526 devm_iounmap(dev, pdsp->iram); in knav_queue_init_pdsps()
1527 if (!IS_ERR(pdsp->regs)) in knav_queue_init_pdsps()
1528 devm_iounmap(dev, pdsp->regs); in knav_queue_init_pdsps()
1529 if (!IS_ERR(pdsp->intd)) in knav_queue_init_pdsps()
1530 devm_iounmap(dev, pdsp->intd); in knav_queue_init_pdsps()
1534 of_property_read_u32(child, "id", &pdsp->id); in knav_queue_init_pdsps()
1535 list_add_tail(&pdsp->list, &kdev->pdsps); in knav_queue_init_pdsps()
1537 pdsp->name, pdsp->command, pdsp->iram, pdsp->regs, in knav_queue_init_pdsps()
1538 pdsp->intd); in knav_queue_init_pdsps()
1549 val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE; in knav_queue_stop_pdsp()
1550 writel_relaxed(val, &pdsp->regs->control); in knav_queue_stop_pdsp()
1551 ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout, in knav_queue_stop_pdsp()
1554 dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name); in knav_queue_stop_pdsp()
1557 pdsp->loaded = false; in knav_queue_stop_pdsp()
1558 pdsp->started = false; in knav_queue_stop_pdsp()
1574 kdev->dev); in knav_queue_load_pdsp()
1583 dev_err(kdev->dev, "failed to get firmware for pdsp\n"); in knav_queue_load_pdsp()
1584 return -ENODEV; in knav_queue_load_pdsp()
1587 dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n", in knav_queue_load_pdsp()
1590 writel_relaxed(pdsp->id + 1, pdsp->command + 0x18); in knav_queue_load_pdsp()
1592 fwdata = (u32 *)fw->data; in knav_queue_load_pdsp()
1593 fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32); in knav_queue_load_pdsp()
1595 writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i); in knav_queue_load_pdsp()
1608 writel_relaxed(0xffffffff, pdsp->command); in knav_queue_start_pdsp()
1609 while (readl_relaxed(pdsp->command) != 0xffffffff) in knav_queue_start_pdsp()
1613 val = readl_relaxed(&pdsp->regs->control); in knav_queue_start_pdsp()
1615 writel_relaxed(val, &pdsp->regs->control); in knav_queue_start_pdsp()
1618 val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE; in knav_queue_start_pdsp()
1619 writel_relaxed(val, &pdsp->regs->control); in knav_queue_start_pdsp()
1622 ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0); in knav_queue_start_pdsp()
1624 dev_err(kdev->dev, in knav_queue_start_pdsp()
1626 pdsp->name); in knav_queue_start_pdsp()
1656 pdsp->loaded = true; in knav_queue_start_pdsps()
1660 if (pdsp->loaded) { in knav_queue_start_pdsps()
1663 pdsp->started = true; in knav_queue_start_pdsps()
1674 if ((id >= qmgr->start_queue) && in knav_find_qmgr()
1675 (id < qmgr->start_queue + qmgr->num_queues)) in knav_find_qmgr()
1687 inst->qmgr = knav_find_qmgr(id); in knav_queue_init_queue()
1688 if (!inst->qmgr) in knav_queue_init_queue()
1689 return -1; in knav_queue_init_queue()
1691 INIT_LIST_HEAD(&inst->handles); in knav_queue_init_queue()
1692 inst->kdev = kdev; in knav_queue_init_queue()
1693 inst->range = range; in knav_queue_init_queue()
1694 inst->irq_num = -1; in knav_queue_init_queue()
1695 inst->id = id; in knav_queue_init_queue()
1696 scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id); in knav_queue_init_queue()
1697 inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL); in knav_queue_init_queue()
1699 if (range->ops && range->ops->init_queue) in knav_queue_init_queue()
1700 return range->ops->init_queue(range, inst); in knav_queue_init_queue()
1717 kdev->inst_shift = order_base_2(size); in knav_queue_init_queues()
1718 size = (1 << kdev->inst_shift) * kdev->num_queues_in_use; in knav_queue_init_queues()
1719 kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL); in knav_queue_init_queues()
1720 if (!kdev->instances) in knav_queue_init_queues()
1721 return -ENOMEM; in knav_queue_init_queues()
1724 if (range->ops && range->ops->init_range) in knav_queue_init_queues()
1725 range->ops->init_range(range); in knav_queue_init_queues()
1727 for (id = range->queue_base; in knav_queue_init_queues()
1728 id < range->queue_base + range->num_queues; id++, idx++) { in knav_queue_init_queues()
1734 range->queue_base_inst = in knav_queue_init_queues()
1743 .compatible = "ti,keystone-navigator-qmss",
1746 .compatible = "ti,66ak2g-navss-qm",
1755 struct device_node *node = pdev->dev.of_node; in knav_queue_probe()
1758 struct device *dev = &pdev->dev; in knav_queue_probe()
1764 return -ENODEV; in knav_queue_probe()
1770 return -ENOMEM; in knav_queue_probe()
1774 if (match && match->data) in knav_queue_probe()
1775 kdev->version = QMSS_66AK2G; in knav_queue_probe()
1778 kdev->dev = dev; in knav_queue_probe()
1779 INIT_LIST_HEAD(&kdev->queue_ranges); in knav_queue_probe()
1780 INIT_LIST_HEAD(&kdev->qmgrs); in knav_queue_probe()
1781 INIT_LIST_HEAD(&kdev->pools); in knav_queue_probe()
1782 INIT_LIST_HEAD(&kdev->regions); in knav_queue_probe()
1783 INIT_LIST_HEAD(&kdev->pdsps); in knav_queue_probe()
1785 pm_runtime_enable(&pdev->dev); in knav_queue_probe()
1786 ret = pm_runtime_resume_and_get(&pdev->dev); in knav_queue_probe()
1792 if (of_property_read_u32_array(node, "queue-range", temp, 2)) { in knav_queue_probe()
1793 dev_err(dev, "queue-range not specified\n"); in knav_queue_probe()
1794 ret = -ENODEV; in knav_queue_probe()
1797 kdev->base_id = temp[0]; in knav_queue_probe()
1798 kdev->num_queues = temp[1]; in knav_queue_probe()
1804 ret = -ENODEV; in knav_queue_probe()
1826 queue_pools = of_get_child_by_name(node, "queue-pools"); in knav_queue_probe()
1828 dev_err(dev, "queue-pools not specified\n"); in knav_queue_probe()
1829 ret = -ENODEV; in knav_queue_probe()
1837 ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]); in knav_queue_probe()
1839 dev_err(kdev->dev, "could not setup linking ram\n"); in knav_queue_probe()
1843 ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]); in knav_queue_probe()
1855 regions = of_get_child_by_name(node, "descriptor-regions"); in knav_queue_probe()
1857 dev_err(dev, "descriptor-regions not specified\n"); in knav_queue_probe()
1858 ret = -ENODEV; in knav_queue_probe()
1881 pm_runtime_put_sync(&pdev->dev); in knav_queue_probe()
1882 pm_runtime_disable(&pdev->dev); in knav_queue_probe()
1889 pm_runtime_put_sync(&pdev->dev); in knav_queue_remove()
1890 pm_runtime_disable(&pdev->dev); in knav_queue_remove()
1898 .name = "keystone-navigator-qmss",