Lines Matching refs:pthr

778 	struct perf_thread *pthr = data;  in perf_dma_copy_callback()  local
780 atomic_dec(&pthr->dma_sync); in perf_dma_copy_callback()
781 wake_up(&pthr->dma_wait); in perf_dma_copy_callback()
784 static int perf_copy_chunk(struct perf_thread *pthr, in perf_copy_chunk() argument
791 struct perf_peer *peer = pthr->perf->test_peer; in perf_copy_chunk()
801 dma_dev = pthr->dma_chan->device->dev; in perf_copy_chunk()
803 if (!is_dma_copy_aligned(pthr->dma_chan->device, offset_in_page(src), in perf_copy_chunk()
825 tx = dmaengine_prep_dma_memcpy(pthr->dma_chan, dst_dma_addr, in perf_copy_chunk()
837 tx->callback_param = pthr; in perf_copy_chunk()
848 atomic_inc(&pthr->dma_sync); in perf_copy_chunk()
849 dma_async_issue_pending(pthr->dma_chan); in perf_copy_chunk()
852 return likely(atomic_read(&pthr->perf->tsync) > 0) ? 0 : -EINTR; in perf_copy_chunk()
870 static int perf_init_test(struct perf_thread *pthr) in perf_init_test() argument
872 struct perf_ctx *perf = pthr->perf; in perf_init_test()
874 struct perf_peer *peer = pthr->perf->test_peer; in perf_init_test()
876 pthr->src = kmalloc_node(perf->test_peer->outbuf_size, GFP_KERNEL, in perf_init_test()
878 if (!pthr->src) in perf_init_test()
881 get_random_bytes(pthr->src, perf->test_peer->outbuf_size); in perf_init_test()
888 pthr->dma_chan = dma_request_channel(dma_mask, perf_dma_filter, perf); in perf_init_test()
889 if (!pthr->dma_chan) { in perf_init_test()
891 pthr->tidx); in perf_init_test()
895 dma_map_resource(pthr->dma_chan->device->dev, in perf_init_test()
898 if (dma_mapping_error(pthr->dma_chan->device->dev, in perf_init_test()
900 dev_err(pthr->dma_chan->device->dev, "%d: Failed to map DMA addr\n", in perf_init_test()
901 pthr->tidx); in perf_init_test()
903 dma_release_channel(pthr->dma_chan); in perf_init_test()
906 dev_dbg(pthr->dma_chan->device->dev, "%d: Map MMIO %pa to DMA addr %pad\n", in perf_init_test()
907 pthr->tidx, in perf_init_test()
911 atomic_set(&pthr->dma_sync, 0); in perf_init_test()
917 kfree(pthr->src); in perf_init_test()
921 static int perf_run_test(struct perf_thread *pthr) in perf_run_test() argument
923 struct perf_peer *peer = pthr->perf->test_peer; in perf_run_test()
924 struct perf_ctx *perf = pthr->perf; in perf_run_test()
934 flt_src = pthr->src; in perf_run_test()
938 pthr->duration = ktime_get(); in perf_run_test()
941 while (pthr->copied < total_size) { in perf_run_test()
942 ret = perf_copy_chunk(pthr, flt_dst, flt_src, chunk_size); in perf_run_test()
945 pthr->tidx, ret); in perf_run_test()
949 pthr->copied += chunk_size; in perf_run_test()
955 flt_src = pthr->src; in perf_run_test()
965 static int perf_sync_test(struct perf_thread *pthr) in perf_sync_test() argument
967 struct perf_ctx *perf = pthr->perf; in perf_sync_test()
972 wait_event(pthr->dma_wait, in perf_sync_test()
973 (atomic_read(&pthr->dma_sync) == 0 || in perf_sync_test()
980 pthr->duration = ktime_sub(ktime_get(), pthr->duration); in perf_sync_test()
983 pthr->tidx, pthr->copied); in perf_sync_test()
986 pthr->tidx, ktime_to_us(pthr->duration)); in perf_sync_test()
988 dev_dbg(&perf->ntb->dev, "%d: %llu MBytes/s\n", pthr->tidx, in perf_sync_test()
989 div64_u64(pthr->copied, ktime_to_us(pthr->duration))); in perf_sync_test()
994 static void perf_clear_test(struct perf_thread *pthr) in perf_clear_test() argument
996 struct perf_ctx *perf = pthr->perf; in perf_clear_test()
1005 (void)dmaengine_terminate_sync(pthr->dma_chan); in perf_clear_test()
1006 if (pthr->perf->test_peer->dma_dst_addr) in perf_clear_test()
1007 dma_unmap_resource(pthr->dma_chan->device->dev, in perf_clear_test()
1008 pthr->perf->test_peer->dma_dst_addr, in perf_clear_test()
1009 pthr->perf->test_peer->outbuf_size, in perf_clear_test()
1012 dma_release_channel(pthr->dma_chan); in perf_clear_test()
1017 kfree(pthr->src); in perf_clear_test()
1022 struct perf_thread *pthr = to_thread_work(work); in perf_thread_work() local
1032 ret = perf_init_test(pthr); in perf_thread_work()
1034 pthr->status = ret; in perf_thread_work()
1038 ret = perf_run_test(pthr); in perf_thread_work()
1040 pthr->status = ret; in perf_thread_work()
1044 pthr->status = perf_sync_test(pthr); in perf_thread_work()
1047 perf_clear_test(pthr); in perf_thread_work()
1081 struct perf_thread *pthr; in perf_submit_test() local
1095 pthr = &perf->threads[tidx]; in perf_submit_test()
1097 pthr->status = -ENODATA; in perf_submit_test()
1098 pthr->copied = 0; in perf_submit_test()
1099 pthr->duration = ktime_set(0, 0); in perf_submit_test()
1101 (void)queue_work(perf_wq, &pthr->work); in perf_submit_test()
1119 struct perf_thread *pthr; in perf_read_stats() local
1129 pthr = &perf->threads[tidx]; in perf_read_stats()
1131 if (pthr->status == -ENODATA) in perf_read_stats()
1134 if (pthr->status) { in perf_read_stats()
1136 "%d: error status %d\n", tidx, pthr->status); in perf_read_stats()
1142 tidx, pthr->copied, ktime_to_us(pthr->duration), in perf_read_stats()
1143 div64_u64(pthr->copied, ktime_to_us(pthr->duration))); in perf_read_stats()
1153 struct perf_thread *pthr; in perf_init_threads() local
1161 pthr = &perf->threads[tidx]; in perf_init_threads()
1163 pthr->perf = perf; in perf_init_threads()
1164 pthr->tidx = tidx; in perf_init_threads()
1165 pthr->status = -ENODATA; in perf_init_threads()
1166 init_waitqueue_head(&pthr->dma_wait); in perf_init_threads()
1167 INIT_WORK(&pthr->work, perf_thread_work); in perf_init_threads()