1 /*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 #include <trace/events/dma_fence.h>
27
28 #include "virtgpu_drv.h"
29
virtio_get_driver_name(struct dma_fence * f)30 static const char *virtio_get_driver_name(struct dma_fence *f)
31 {
32 return "virtio_gpu";
33 }
34
virtio_get_timeline_name(struct dma_fence * f)35 static const char *virtio_get_timeline_name(struct dma_fence *f)
36 {
37 return "controlq";
38 }
39
virtio_fence_signaled(struct dma_fence * f)40 bool virtio_fence_signaled(struct dma_fence *f)
41 {
42 struct virtio_gpu_fence *fence = to_virtio_fence(f);
43
44 if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno)
45 return true;
46 return false;
47 }
48
virtio_fence_value_str(struct dma_fence * f,char * str,int size)49 static void virtio_fence_value_str(struct dma_fence *f, char *str, int size)
50 {
51 snprintf(str, size, "%llu", f->seqno);
52 }
53
virtio_timeline_value_str(struct dma_fence * f,char * str,int size)54 static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
55 {
56 struct virtio_gpu_fence *fence = to_virtio_fence(f);
57
58 snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq));
59 }
60
61 static const struct dma_fence_ops virtio_fence_ops = {
62 .get_driver_name = virtio_get_driver_name,
63 .get_timeline_name = virtio_get_timeline_name,
64 .signaled = virtio_fence_signaled,
65 .fence_value_str = virtio_fence_value_str,
66 .timeline_value_str = virtio_timeline_value_str,
67 };
68
virtio_gpu_fence_alloc(struct virtio_gpu_device * vgdev)69 struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
70 {
71 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
72 struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
73 GFP_KERNEL);
74 if (!fence)
75 return fence;
76
77 fence->drv = drv;
78
79 /* This only partially initializes the fence because the seqno is
80 * unknown yet. The fence must not be used outside of the driver
81 * until virtio_gpu_fence_emit is called.
82 */
83 dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0);
84
85 return fence;
86 }
87
virtio_gpu_fence_emit(struct virtio_gpu_device * vgdev,struct virtio_gpu_ctrl_hdr * cmd_hdr,struct virtio_gpu_fence * fence)88 void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
89 struct virtio_gpu_ctrl_hdr *cmd_hdr,
90 struct virtio_gpu_fence *fence)
91 {
92 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
93 unsigned long irq_flags;
94
95 spin_lock_irqsave(&drv->lock, irq_flags);
96 fence->f.seqno = ++drv->sync_seq;
97 dma_fence_get(&fence->f);
98 list_add_tail(&fence->node, &drv->fences);
99 spin_unlock_irqrestore(&drv->lock, irq_flags);
100
101 trace_dma_fence_emit(&fence->f);
102
103 cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
104 cmd_hdr->fence_id = cpu_to_le64(fence->f.seqno);
105 }
106
virtio_gpu_fence_event_process(struct virtio_gpu_device * vgdev,u64 last_seq)107 void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
108 u64 last_seq)
109 {
110 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
111 struct virtio_gpu_fence *fence, *tmp;
112 unsigned long irq_flags;
113
114 spin_lock_irqsave(&drv->lock, irq_flags);
115 atomic64_set(&vgdev->fence_drv.last_seq, last_seq);
116 list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
117 if (last_seq < fence->f.seqno)
118 continue;
119 dma_fence_signal_locked(&fence->f);
120 list_del(&fence->node);
121 dma_fence_put(&fence->f);
122 }
123 spin_unlock_irqrestore(&drv->lock, irq_flags);
124 }
125