1 /*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29 #include <linux/dma-mapping.h>
30 #include <linux/virtio.h>
31 #include <linux/virtio_config.h>
32 #include <linux/virtio_ring.h>
33
34 #include "virtgpu_drv.h"
35 #include "virtgpu_trace.h"
36
37 #define MAX_INLINE_CMD_SIZE 96
38 #define MAX_INLINE_RESP_SIZE 24
39 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
40 + MAX_INLINE_CMD_SIZE \
41 + MAX_INLINE_RESP_SIZE)
42
convert_to_hw_box(struct virtio_gpu_box * dst,const struct drm_virtgpu_3d_box * src)43 static void convert_to_hw_box(struct virtio_gpu_box *dst,
44 const struct drm_virtgpu_3d_box *src)
45 {
46 dst->x = cpu_to_le32(src->x);
47 dst->y = cpu_to_le32(src->y);
48 dst->z = cpu_to_le32(src->z);
49 dst->w = cpu_to_le32(src->w);
50 dst->h = cpu_to_le32(src->h);
51 dst->d = cpu_to_le32(src->d);
52 }
53
virtio_gpu_ctrl_ack(struct virtqueue * vq)54 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
55 {
56 struct drm_device *dev = vq->vdev->priv;
57 struct virtio_gpu_device *vgdev = dev->dev_private;
58
59 schedule_work(&vgdev->ctrlq.dequeue_work);
60 }
61
virtio_gpu_cursor_ack(struct virtqueue * vq)62 void virtio_gpu_cursor_ack(struct virtqueue *vq)
63 {
64 struct drm_device *dev = vq->vdev->priv;
65 struct virtio_gpu_device *vgdev = dev->dev_private;
66
67 schedule_work(&vgdev->cursorq.dequeue_work);
68 }
69
virtio_gpu_alloc_vbufs(struct virtio_gpu_device * vgdev)70 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
71 {
72 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
73 VBUFFER_SIZE,
74 __alignof__(struct virtio_gpu_vbuffer),
75 0, NULL);
76 if (!vgdev->vbufs)
77 return -ENOMEM;
78 return 0;
79 }
80
virtio_gpu_free_vbufs(struct virtio_gpu_device * vgdev)81 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
82 {
83 kmem_cache_destroy(vgdev->vbufs);
84 vgdev->vbufs = NULL;
85 }
86
87 static struct virtio_gpu_vbuffer*
virtio_gpu_get_vbuf(struct virtio_gpu_device * vgdev,int size,int resp_size,void * resp_buf,virtio_gpu_resp_cb resp_cb)88 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
89 int size, int resp_size, void *resp_buf,
90 virtio_gpu_resp_cb resp_cb)
91 {
92 struct virtio_gpu_vbuffer *vbuf;
93
94 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
95 if (!vbuf)
96 return ERR_PTR(-ENOMEM);
97
98 BUG_ON(size > MAX_INLINE_CMD_SIZE ||
99 size < sizeof(struct virtio_gpu_ctrl_hdr));
100 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
101 vbuf->size = size;
102
103 vbuf->resp_cb = resp_cb;
104 vbuf->resp_size = resp_size;
105 if (resp_size <= MAX_INLINE_RESP_SIZE)
106 vbuf->resp_buf = (void *)vbuf->buf + size;
107 else
108 vbuf->resp_buf = resp_buf;
109 BUG_ON(!vbuf->resp_buf);
110 return vbuf;
111 }
112
113 static struct virtio_gpu_ctrl_hdr *
virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer * vbuf)114 virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
115 {
116 /* this assumes a vbuf contains a command that starts with a
117 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
118 * virtqueues.
119 */
120 return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
121 }
122
123 static struct virtio_gpu_update_cursor*
virtio_gpu_alloc_cursor(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p)124 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
125 struct virtio_gpu_vbuffer **vbuffer_p)
126 {
127 struct virtio_gpu_vbuffer *vbuf;
128
129 vbuf = virtio_gpu_get_vbuf
130 (vgdev, sizeof(struct virtio_gpu_update_cursor),
131 0, NULL, NULL);
132 if (IS_ERR(vbuf)) {
133 *vbuffer_p = NULL;
134 return ERR_CAST(vbuf);
135 }
136 *vbuffer_p = vbuf;
137 return (struct virtio_gpu_update_cursor *)vbuf->buf;
138 }
139
virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device * vgdev,virtio_gpu_resp_cb cb,struct virtio_gpu_vbuffer ** vbuffer_p,int cmd_size,int resp_size,void * resp_buf)140 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
141 virtio_gpu_resp_cb cb,
142 struct virtio_gpu_vbuffer **vbuffer_p,
143 int cmd_size, int resp_size,
144 void *resp_buf)
145 {
146 struct virtio_gpu_vbuffer *vbuf;
147
148 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
149 resp_size, resp_buf, cb);
150 if (IS_ERR(vbuf)) {
151 *vbuffer_p = NULL;
152 return ERR_CAST(vbuf);
153 }
154 *vbuffer_p = vbuf;
155 return (struct virtio_gpu_command *)vbuf->buf;
156 }
157
virtio_gpu_alloc_cmd(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p,int size)158 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
159 struct virtio_gpu_vbuffer **vbuffer_p,
160 int size)
161 {
162 return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
163 sizeof(struct virtio_gpu_ctrl_hdr),
164 NULL);
165 }
166
virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p,int size,virtio_gpu_resp_cb cb)167 static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
168 struct virtio_gpu_vbuffer **vbuffer_p,
169 int size,
170 virtio_gpu_resp_cb cb)
171 {
172 return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
173 sizeof(struct virtio_gpu_ctrl_hdr),
174 NULL);
175 }
176
free_vbuf(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)177 static void free_vbuf(struct virtio_gpu_device *vgdev,
178 struct virtio_gpu_vbuffer *vbuf)
179 {
180 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
181 kfree(vbuf->resp_buf);
182 kvfree(vbuf->data_buf);
183 kmem_cache_free(vgdev->vbufs, vbuf);
184 }
185
reclaim_vbufs(struct virtqueue * vq,struct list_head * reclaim_list)186 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
187 {
188 struct virtio_gpu_vbuffer *vbuf;
189 unsigned int len;
190 int freed = 0;
191
192 while ((vbuf = virtqueue_get_buf(vq, &len))) {
193 list_add_tail(&vbuf->list, reclaim_list);
194 freed++;
195 }
196 if (freed == 0)
197 DRM_DEBUG("Huh? zero vbufs reclaimed");
198 }
199
virtio_gpu_dequeue_ctrl_func(struct work_struct * work)200 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
201 {
202 struct virtio_gpu_device *vgdev =
203 container_of(work, struct virtio_gpu_device,
204 ctrlq.dequeue_work);
205 struct list_head reclaim_list;
206 struct virtio_gpu_vbuffer *entry, *tmp;
207 struct virtio_gpu_ctrl_hdr *resp;
208 u64 fence_id = 0;
209
210 INIT_LIST_HEAD(&reclaim_list);
211 spin_lock(&vgdev->ctrlq.qlock);
212 do {
213 virtqueue_disable_cb(vgdev->ctrlq.vq);
214 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
215
216 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
217 spin_unlock(&vgdev->ctrlq.qlock);
218
219 list_for_each_entry(entry, &reclaim_list, list) {
220 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
221
222 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
223
224 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
225 if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
226 struct virtio_gpu_ctrl_hdr *cmd;
227 cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
228 DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
229 le32_to_cpu(resp->type),
230 le32_to_cpu(cmd->type));
231 } else
232 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
233 }
234 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
235 u64 f = le64_to_cpu(resp->fence_id);
236
237 if (fence_id > f) {
238 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
239 __func__, fence_id, f);
240 } else {
241 fence_id = f;
242 }
243 }
244 if (entry->resp_cb)
245 entry->resp_cb(vgdev, entry);
246 }
247 wake_up(&vgdev->ctrlq.ack_queue);
248
249 if (fence_id)
250 virtio_gpu_fence_event_process(vgdev, fence_id);
251
252 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
253 if (entry->objs)
254 virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
255 list_del(&entry->list);
256 free_vbuf(vgdev, entry);
257 }
258 }
259
virtio_gpu_dequeue_cursor_func(struct work_struct * work)260 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
261 {
262 struct virtio_gpu_device *vgdev =
263 container_of(work, struct virtio_gpu_device,
264 cursorq.dequeue_work);
265 struct list_head reclaim_list;
266 struct virtio_gpu_vbuffer *entry, *tmp;
267
268 INIT_LIST_HEAD(&reclaim_list);
269 spin_lock(&vgdev->cursorq.qlock);
270 do {
271 virtqueue_disable_cb(vgdev->cursorq.vq);
272 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
273 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
274 spin_unlock(&vgdev->cursorq.qlock);
275
276 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
277 list_del(&entry->list);
278 free_vbuf(vgdev, entry);
279 }
280 wake_up(&vgdev->cursorq.ack_queue);
281 }
282
283 /* Create sg_table from a vmalloc'd buffer. */
vmalloc_to_sgt(char * data,uint32_t size,int * sg_ents)284 static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
285 {
286 int ret, s, i;
287 struct sg_table *sgt;
288 struct scatterlist *sg;
289 struct page *pg;
290
291 if (WARN_ON(!PAGE_ALIGNED(data)))
292 return NULL;
293
294 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
295 if (!sgt)
296 return NULL;
297
298 *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
299 ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
300 if (ret) {
301 kfree(sgt);
302 return NULL;
303 }
304
305 for_each_sgtable_sg(sgt, sg, i) {
306 pg = vmalloc_to_page(data);
307 if (!pg) {
308 sg_free_table(sgt);
309 kfree(sgt);
310 return NULL;
311 }
312
313 s = min_t(int, PAGE_SIZE, size);
314 sg_set_page(sg, pg, s, 0);
315
316 size -= s;
317 data += s;
318 }
319
320 return sgt;
321 }
322
virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf,struct virtio_gpu_fence * fence,int elemcnt,struct scatterlist ** sgs,int outcnt,int incnt)323 static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
324 struct virtio_gpu_vbuffer *vbuf,
325 struct virtio_gpu_fence *fence,
326 int elemcnt,
327 struct scatterlist **sgs,
328 int outcnt,
329 int incnt)
330 {
331 struct virtqueue *vq = vgdev->ctrlq.vq;
332 int ret, idx;
333
334 if (!drm_dev_enter(vgdev->ddev, &idx)) {
335 if (fence && vbuf->objs)
336 virtio_gpu_array_unlock_resv(vbuf->objs);
337 free_vbuf(vgdev, vbuf);
338 return -1;
339 }
340
341 if (vgdev->has_indirect)
342 elemcnt = 1;
343
344 again:
345 spin_lock(&vgdev->ctrlq.qlock);
346
347 if (vq->num_free < elemcnt) {
348 spin_unlock(&vgdev->ctrlq.qlock);
349 virtio_gpu_notify(vgdev);
350 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
351 goto again;
352 }
353
354 /* now that the position of the vbuf in the virtqueue is known, we can
355 * finally set the fence id
356 */
357 if (fence) {
358 virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
359 fence);
360 if (vbuf->objs) {
361 virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
362 virtio_gpu_array_unlock_resv(vbuf->objs);
363 }
364 }
365
366 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
367 WARN_ON(ret);
368
369 trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
370
371 atomic_inc(&vgdev->pending_commands);
372
373 spin_unlock(&vgdev->ctrlq.qlock);
374
375 drm_dev_exit(idx);
376 return 0;
377 }
378
virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf,struct virtio_gpu_fence * fence)379 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
380 struct virtio_gpu_vbuffer *vbuf,
381 struct virtio_gpu_fence *fence)
382 {
383 struct scatterlist *sgs[3], vcmd, vout, vresp;
384 struct sg_table *sgt = NULL;
385 int elemcnt = 0, outcnt = 0, incnt = 0, ret;
386
387 /* set up vcmd */
388 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
389 elemcnt++;
390 sgs[outcnt] = &vcmd;
391 outcnt++;
392
393 /* set up vout */
394 if (vbuf->data_size) {
395 if (is_vmalloc_addr(vbuf->data_buf)) {
396 int sg_ents;
397 sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
398 &sg_ents);
399 if (!sgt) {
400 if (fence && vbuf->objs)
401 virtio_gpu_array_unlock_resv(vbuf->objs);
402 return -1;
403 }
404
405 elemcnt += sg_ents;
406 sgs[outcnt] = sgt->sgl;
407 } else {
408 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
409 elemcnt++;
410 sgs[outcnt] = &vout;
411 }
412 outcnt++;
413 }
414
415 /* set up vresp */
416 if (vbuf->resp_size) {
417 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
418 elemcnt++;
419 sgs[outcnt + incnt] = &vresp;
420 incnt++;
421 }
422
423 ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
424 incnt);
425
426 if (sgt) {
427 sg_free_table(sgt);
428 kfree(sgt);
429 }
430 return ret;
431 }
432
virtio_gpu_notify(struct virtio_gpu_device * vgdev)433 void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
434 {
435 bool notify;
436
437 if (!atomic_read(&vgdev->pending_commands))
438 return;
439
440 spin_lock(&vgdev->ctrlq.qlock);
441 atomic_set(&vgdev->pending_commands, 0);
442 notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
443 spin_unlock(&vgdev->ctrlq.qlock);
444
445 if (notify)
446 virtqueue_notify(vgdev->ctrlq.vq);
447 }
448
virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)449 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
450 struct virtio_gpu_vbuffer *vbuf)
451 {
452 return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
453 }
454
virtio_gpu_queue_cursor(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)455 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
456 struct virtio_gpu_vbuffer *vbuf)
457 {
458 struct virtqueue *vq = vgdev->cursorq.vq;
459 struct scatterlist *sgs[1], ccmd;
460 int idx, ret, outcnt;
461 bool notify;
462
463 if (!drm_dev_enter(vgdev->ddev, &idx)) {
464 free_vbuf(vgdev, vbuf);
465 return;
466 }
467
468 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
469 sgs[0] = &ccmd;
470 outcnt = 1;
471
472 spin_lock(&vgdev->cursorq.qlock);
473 retry:
474 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
475 if (ret == -ENOSPC) {
476 spin_unlock(&vgdev->cursorq.qlock);
477 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
478 spin_lock(&vgdev->cursorq.qlock);
479 goto retry;
480 } else {
481 trace_virtio_gpu_cmd_queue(vq,
482 virtio_gpu_vbuf_ctrl_hdr(vbuf));
483
484 notify = virtqueue_kick_prepare(vq);
485 }
486
487 spin_unlock(&vgdev->cursorq.qlock);
488
489 if (notify)
490 virtqueue_notify(vq);
491
492 drm_dev_exit(idx);
493 }
494
495 /* just create gem objects for userspace and long lived objects,
496 * just use dma_alloced pages for the queue objects?
497 */
498
499 /* create a basic resource */
virtio_gpu_cmd_create_resource(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_object_params * params,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)500 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
501 struct virtio_gpu_object *bo,
502 struct virtio_gpu_object_params *params,
503 struct virtio_gpu_object_array *objs,
504 struct virtio_gpu_fence *fence)
505 {
506 struct virtio_gpu_resource_create_2d *cmd_p;
507 struct virtio_gpu_vbuffer *vbuf;
508
509 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
510 memset(cmd_p, 0, sizeof(*cmd_p));
511 vbuf->objs = objs;
512
513 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
514 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
515 cmd_p->format = cpu_to_le32(params->format);
516 cmd_p->width = cpu_to_le32(params->width);
517 cmd_p->height = cpu_to_le32(params->height);
518
519 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
520 bo->created = true;
521 }
522
virtio_gpu_cmd_unref_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)523 static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
524 struct virtio_gpu_vbuffer *vbuf)
525 {
526 struct virtio_gpu_object *bo;
527
528 bo = vbuf->resp_cb_data;
529 vbuf->resp_cb_data = NULL;
530
531 virtio_gpu_cleanup_object(bo);
532 }
533
virtio_gpu_cmd_unref_resource(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo)534 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
535 struct virtio_gpu_object *bo)
536 {
537 struct virtio_gpu_resource_unref *cmd_p;
538 struct virtio_gpu_vbuffer *vbuf;
539 int ret;
540
541 cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
542 virtio_gpu_cmd_unref_cb);
543 memset(cmd_p, 0, sizeof(*cmd_p));
544
545 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
546 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
547
548 vbuf->resp_cb_data = bo;
549 ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
550 if (ret < 0)
551 virtio_gpu_cleanup_object(bo);
552 }
553
virtio_gpu_cmd_set_scanout(struct virtio_gpu_device * vgdev,uint32_t scanout_id,uint32_t resource_id,uint32_t width,uint32_t height,uint32_t x,uint32_t y)554 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
555 uint32_t scanout_id, uint32_t resource_id,
556 uint32_t width, uint32_t height,
557 uint32_t x, uint32_t y)
558 {
559 struct virtio_gpu_set_scanout *cmd_p;
560 struct virtio_gpu_vbuffer *vbuf;
561
562 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
563 memset(cmd_p, 0, sizeof(*cmd_p));
564
565 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
566 cmd_p->resource_id = cpu_to_le32(resource_id);
567 cmd_p->scanout_id = cpu_to_le32(scanout_id);
568 cmd_p->r.width = cpu_to_le32(width);
569 cmd_p->r.height = cpu_to_le32(height);
570 cmd_p->r.x = cpu_to_le32(x);
571 cmd_p->r.y = cpu_to_le32(y);
572
573 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
574 }
575
virtio_gpu_cmd_resource_flush(struct virtio_gpu_device * vgdev,uint32_t resource_id,uint32_t x,uint32_t y,uint32_t width,uint32_t height,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)576 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
577 uint32_t resource_id,
578 uint32_t x, uint32_t y,
579 uint32_t width, uint32_t height,
580 struct virtio_gpu_object_array *objs,
581 struct virtio_gpu_fence *fence)
582 {
583 struct virtio_gpu_resource_flush *cmd_p;
584 struct virtio_gpu_vbuffer *vbuf;
585
586 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
587 memset(cmd_p, 0, sizeof(*cmd_p));
588 vbuf->objs = objs;
589
590 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
591 cmd_p->resource_id = cpu_to_le32(resource_id);
592 cmd_p->r.width = cpu_to_le32(width);
593 cmd_p->r.height = cpu_to_le32(height);
594 cmd_p->r.x = cpu_to_le32(x);
595 cmd_p->r.y = cpu_to_le32(y);
596
597 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
598 }
599
virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device * vgdev,uint64_t offset,uint32_t width,uint32_t height,uint32_t x,uint32_t y,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)600 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
601 uint64_t offset,
602 uint32_t width, uint32_t height,
603 uint32_t x, uint32_t y,
604 struct virtio_gpu_object_array *objs,
605 struct virtio_gpu_fence *fence)
606 {
607 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
608 struct virtio_gpu_transfer_to_host_2d *cmd_p;
609 struct virtio_gpu_vbuffer *vbuf;
610 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
611 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
612
613 if (use_dma_api)
614 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
615 shmem->pages, DMA_TO_DEVICE);
616
617 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
618 memset(cmd_p, 0, sizeof(*cmd_p));
619 vbuf->objs = objs;
620
621 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
622 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
623 cmd_p->offset = cpu_to_le64(offset);
624 cmd_p->r.width = cpu_to_le32(width);
625 cmd_p->r.height = cpu_to_le32(height);
626 cmd_p->r.x = cpu_to_le32(x);
627 cmd_p->r.y = cpu_to_le32(y);
628
629 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
630 }
631
632 static void
virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device * vgdev,uint32_t resource_id,struct virtio_gpu_mem_entry * ents,uint32_t nents,struct virtio_gpu_fence * fence)633 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
634 uint32_t resource_id,
635 struct virtio_gpu_mem_entry *ents,
636 uint32_t nents,
637 struct virtio_gpu_fence *fence)
638 {
639 struct virtio_gpu_resource_attach_backing *cmd_p;
640 struct virtio_gpu_vbuffer *vbuf;
641
642 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
643 memset(cmd_p, 0, sizeof(*cmd_p));
644
645 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
646 cmd_p->resource_id = cpu_to_le32(resource_id);
647 cmd_p->nr_entries = cpu_to_le32(nents);
648
649 vbuf->data_buf = ents;
650 vbuf->data_size = sizeof(*ents) * nents;
651
652 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
653 }
654
virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)655 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
656 struct virtio_gpu_vbuffer *vbuf)
657 {
658 struct virtio_gpu_resp_display_info *resp =
659 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
660 int i;
661
662 spin_lock(&vgdev->display_info_lock);
663 for (i = 0; i < vgdev->num_scanouts; i++) {
664 vgdev->outputs[i].info = resp->pmodes[i];
665 if (resp->pmodes[i].enabled) {
666 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
667 le32_to_cpu(resp->pmodes[i].r.width),
668 le32_to_cpu(resp->pmodes[i].r.height),
669 le32_to_cpu(resp->pmodes[i].r.x),
670 le32_to_cpu(resp->pmodes[i].r.y));
671 } else {
672 DRM_DEBUG("output %d: disabled", i);
673 }
674 }
675
676 vgdev->display_info_pending = false;
677 spin_unlock(&vgdev->display_info_lock);
678 wake_up(&vgdev->resp_wq);
679
680 if (!drm_helper_hpd_irq_event(vgdev->ddev))
681 drm_kms_helper_hotplug_event(vgdev->ddev);
682 }
683
virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)684 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
685 struct virtio_gpu_vbuffer *vbuf)
686 {
687 struct virtio_gpu_get_capset_info *cmd =
688 (struct virtio_gpu_get_capset_info *)vbuf->buf;
689 struct virtio_gpu_resp_capset_info *resp =
690 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
691 int i = le32_to_cpu(cmd->capset_index);
692
693 spin_lock(&vgdev->display_info_lock);
694 if (vgdev->capsets) {
695 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
696 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
697 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
698 } else {
699 DRM_ERROR("invalid capset memory.");
700 }
701 spin_unlock(&vgdev->display_info_lock);
702 wake_up(&vgdev->resp_wq);
703 }
704
virtio_gpu_cmd_capset_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)705 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
706 struct virtio_gpu_vbuffer *vbuf)
707 {
708 struct virtio_gpu_get_capset *cmd =
709 (struct virtio_gpu_get_capset *)vbuf->buf;
710 struct virtio_gpu_resp_capset *resp =
711 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
712 struct virtio_gpu_drv_cap_cache *cache_ent;
713
714 spin_lock(&vgdev->display_info_lock);
715 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
716 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
717 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
718 memcpy(cache_ent->caps_cache, resp->capset_data,
719 cache_ent->size);
720 /* Copy must occur before is_valid is signalled. */
721 smp_wmb();
722 atomic_set(&cache_ent->is_valid, 1);
723 break;
724 }
725 }
726 spin_unlock(&vgdev->display_info_lock);
727 wake_up_all(&vgdev->resp_wq);
728 }
729
virtio_get_edid_block(void * data,u8 * buf,unsigned int block,size_t len)730 static int virtio_get_edid_block(void *data, u8 *buf,
731 unsigned int block, size_t len)
732 {
733 struct virtio_gpu_resp_edid *resp = data;
734 size_t start = block * EDID_LENGTH;
735
736 if (start + len > le32_to_cpu(resp->size))
737 return -1;
738 memcpy(buf, resp->edid + start, len);
739 return 0;
740 }
741
virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)742 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
743 struct virtio_gpu_vbuffer *vbuf)
744 {
745 struct virtio_gpu_cmd_get_edid *cmd =
746 (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
747 struct virtio_gpu_resp_edid *resp =
748 (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
749 uint32_t scanout = le32_to_cpu(cmd->scanout);
750 struct virtio_gpu_output *output;
751 struct edid *new_edid, *old_edid;
752
753 if (scanout >= vgdev->num_scanouts)
754 return;
755 output = vgdev->outputs + scanout;
756
757 new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
758 drm_connector_update_edid_property(&output->conn, new_edid);
759
760 spin_lock(&vgdev->display_info_lock);
761 old_edid = output->edid;
762 output->edid = new_edid;
763 spin_unlock(&vgdev->display_info_lock);
764
765 kfree(old_edid);
766 wake_up(&vgdev->resp_wq);
767 }
768
virtio_gpu_cmd_get_display_info(struct virtio_gpu_device * vgdev)769 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
770 {
771 struct virtio_gpu_ctrl_hdr *cmd_p;
772 struct virtio_gpu_vbuffer *vbuf;
773 void *resp_buf;
774
775 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
776 GFP_KERNEL);
777 if (!resp_buf)
778 return -ENOMEM;
779
780 cmd_p = virtio_gpu_alloc_cmd_resp
781 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
782 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
783 resp_buf);
784 memset(cmd_p, 0, sizeof(*cmd_p));
785
786 vgdev->display_info_pending = true;
787 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
788 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
789 return 0;
790 }
791
virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device * vgdev,int idx)792 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
793 {
794 struct virtio_gpu_get_capset_info *cmd_p;
795 struct virtio_gpu_vbuffer *vbuf;
796 void *resp_buf;
797
798 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
799 GFP_KERNEL);
800 if (!resp_buf)
801 return -ENOMEM;
802
803 cmd_p = virtio_gpu_alloc_cmd_resp
804 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
805 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
806 resp_buf);
807 memset(cmd_p, 0, sizeof(*cmd_p));
808
809 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
810 cmd_p->capset_index = cpu_to_le32(idx);
811 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
812 return 0;
813 }
814
virtio_gpu_cmd_get_capset(struct virtio_gpu_device * vgdev,int idx,int version,struct virtio_gpu_drv_cap_cache ** cache_p)815 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
816 int idx, int version,
817 struct virtio_gpu_drv_cap_cache **cache_p)
818 {
819 struct virtio_gpu_get_capset *cmd_p;
820 struct virtio_gpu_vbuffer *vbuf;
821 int max_size;
822 struct virtio_gpu_drv_cap_cache *cache_ent;
823 struct virtio_gpu_drv_cap_cache *search_ent;
824 void *resp_buf;
825
826 *cache_p = NULL;
827
828 if (idx >= vgdev->num_capsets)
829 return -EINVAL;
830
831 if (version > vgdev->capsets[idx].max_version)
832 return -EINVAL;
833
834 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
835 if (!cache_ent)
836 return -ENOMEM;
837
838 max_size = vgdev->capsets[idx].max_size;
839 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
840 if (!cache_ent->caps_cache) {
841 kfree(cache_ent);
842 return -ENOMEM;
843 }
844
845 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
846 GFP_KERNEL);
847 if (!resp_buf) {
848 kfree(cache_ent->caps_cache);
849 kfree(cache_ent);
850 return -ENOMEM;
851 }
852
853 cache_ent->version = version;
854 cache_ent->id = vgdev->capsets[idx].id;
855 atomic_set(&cache_ent->is_valid, 0);
856 cache_ent->size = max_size;
857 spin_lock(&vgdev->display_info_lock);
858 /* Search while under lock in case it was added by another task. */
859 list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
860 if (search_ent->id == vgdev->capsets[idx].id &&
861 search_ent->version == version) {
862 *cache_p = search_ent;
863 break;
864 }
865 }
866 if (!*cache_p)
867 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
868 spin_unlock(&vgdev->display_info_lock);
869
870 if (*cache_p) {
871 /* Entry was found, so free everything that was just created. */
872 kfree(resp_buf);
873 kfree(cache_ent->caps_cache);
874 kfree(cache_ent);
875 return 0;
876 }
877
878 cmd_p = virtio_gpu_alloc_cmd_resp
879 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
880 sizeof(struct virtio_gpu_resp_capset) + max_size,
881 resp_buf);
882 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
883 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
884 cmd_p->capset_version = cpu_to_le32(version);
885 *cache_p = cache_ent;
886 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
887
888 return 0;
889 }
890
virtio_gpu_cmd_get_edids(struct virtio_gpu_device * vgdev)891 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
892 {
893 struct virtio_gpu_cmd_get_edid *cmd_p;
894 struct virtio_gpu_vbuffer *vbuf;
895 void *resp_buf;
896 int scanout;
897
898 if (WARN_ON(!vgdev->has_edid))
899 return -EINVAL;
900
901 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
902 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
903 GFP_KERNEL);
904 if (!resp_buf)
905 return -ENOMEM;
906
907 cmd_p = virtio_gpu_alloc_cmd_resp
908 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
909 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
910 resp_buf);
911 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
912 cmd_p->scanout = cpu_to_le32(scanout);
913 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
914 }
915
916 return 0;
917 }
918
virtio_gpu_cmd_context_create(struct virtio_gpu_device * vgdev,uint32_t id,uint32_t nlen,const char * name)919 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
920 uint32_t nlen, const char *name)
921 {
922 struct virtio_gpu_ctx_create *cmd_p;
923 struct virtio_gpu_vbuffer *vbuf;
924
925 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
926 memset(cmd_p, 0, sizeof(*cmd_p));
927
928 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
929 cmd_p->hdr.ctx_id = cpu_to_le32(id);
930 cmd_p->nlen = cpu_to_le32(nlen);
931 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
932 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
933 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
934 }
935
virtio_gpu_cmd_context_destroy(struct virtio_gpu_device * vgdev,uint32_t id)936 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
937 uint32_t id)
938 {
939 struct virtio_gpu_ctx_destroy *cmd_p;
940 struct virtio_gpu_vbuffer *vbuf;
941
942 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
943 memset(cmd_p, 0, sizeof(*cmd_p));
944
945 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
946 cmd_p->hdr.ctx_id = cpu_to_le32(id);
947 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
948 }
949
virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device * vgdev,uint32_t ctx_id,struct virtio_gpu_object_array * objs)950 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
951 uint32_t ctx_id,
952 struct virtio_gpu_object_array *objs)
953 {
954 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
955 struct virtio_gpu_ctx_resource *cmd_p;
956 struct virtio_gpu_vbuffer *vbuf;
957
958 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
959 memset(cmd_p, 0, sizeof(*cmd_p));
960 vbuf->objs = objs;
961
962 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
963 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
964 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
965 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
966 }
967
virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device * vgdev,uint32_t ctx_id,struct virtio_gpu_object_array * objs)968 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
969 uint32_t ctx_id,
970 struct virtio_gpu_object_array *objs)
971 {
972 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
973 struct virtio_gpu_ctx_resource *cmd_p;
974 struct virtio_gpu_vbuffer *vbuf;
975
976 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
977 memset(cmd_p, 0, sizeof(*cmd_p));
978 vbuf->objs = objs;
979
980 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
981 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
982 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
983 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
984 }
985
986 void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_object_params * params,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)987 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
988 struct virtio_gpu_object *bo,
989 struct virtio_gpu_object_params *params,
990 struct virtio_gpu_object_array *objs,
991 struct virtio_gpu_fence *fence)
992 {
993 struct virtio_gpu_resource_create_3d *cmd_p;
994 struct virtio_gpu_vbuffer *vbuf;
995
996 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
997 memset(cmd_p, 0, sizeof(*cmd_p));
998 vbuf->objs = objs;
999
1000 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
1001 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1002 cmd_p->format = cpu_to_le32(params->format);
1003 cmd_p->width = cpu_to_le32(params->width);
1004 cmd_p->height = cpu_to_le32(params->height);
1005
1006 cmd_p->target = cpu_to_le32(params->target);
1007 cmd_p->bind = cpu_to_le32(params->bind);
1008 cmd_p->depth = cpu_to_le32(params->depth);
1009 cmd_p->array_size = cpu_to_le32(params->array_size);
1010 cmd_p->last_level = cpu_to_le32(params->last_level);
1011 cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
1012 cmd_p->flags = cpu_to_le32(params->flags);
1013
1014 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1015
1016 bo->created = true;
1017 }
1018
virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device * vgdev,uint32_t ctx_id,uint64_t offset,uint32_t level,uint32_t stride,uint32_t layer_stride,struct drm_virtgpu_3d_box * box,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1019 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1020 uint32_t ctx_id,
1021 uint64_t offset, uint32_t level,
1022 uint32_t stride,
1023 uint32_t layer_stride,
1024 struct drm_virtgpu_3d_box *box,
1025 struct virtio_gpu_object_array *objs,
1026 struct virtio_gpu_fence *fence)
1027 {
1028 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1029 struct virtio_gpu_transfer_host_3d *cmd_p;
1030 struct virtio_gpu_vbuffer *vbuf;
1031 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1032
1033 if (virtio_gpu_is_shmem(bo) && use_dma_api) {
1034 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
1035 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
1036 shmem->pages, DMA_TO_DEVICE);
1037 }
1038
1039 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1040 memset(cmd_p, 0, sizeof(*cmd_p));
1041
1042 vbuf->objs = objs;
1043
1044 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1045 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1046 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1047 convert_to_hw_box(&cmd_p->box, box);
1048 cmd_p->offset = cpu_to_le64(offset);
1049 cmd_p->level = cpu_to_le32(level);
1050 cmd_p->stride = cpu_to_le32(stride);
1051 cmd_p->layer_stride = cpu_to_le32(layer_stride);
1052
1053 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1054 }
1055
virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device * vgdev,uint32_t ctx_id,uint64_t offset,uint32_t level,uint32_t stride,uint32_t layer_stride,struct drm_virtgpu_3d_box * box,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1056 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1057 uint32_t ctx_id,
1058 uint64_t offset, uint32_t level,
1059 uint32_t stride,
1060 uint32_t layer_stride,
1061 struct drm_virtgpu_3d_box *box,
1062 struct virtio_gpu_object_array *objs,
1063 struct virtio_gpu_fence *fence)
1064 {
1065 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1066 struct virtio_gpu_transfer_host_3d *cmd_p;
1067 struct virtio_gpu_vbuffer *vbuf;
1068
1069 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1070 memset(cmd_p, 0, sizeof(*cmd_p));
1071
1072 vbuf->objs = objs;
1073
1074 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1075 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1076 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1077 convert_to_hw_box(&cmd_p->box, box);
1078 cmd_p->offset = cpu_to_le64(offset);
1079 cmd_p->level = cpu_to_le32(level);
1080 cmd_p->stride = cpu_to_le32(stride);
1081 cmd_p->layer_stride = cpu_to_le32(layer_stride);
1082
1083 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1084 }
1085
virtio_gpu_cmd_submit(struct virtio_gpu_device * vgdev,void * data,uint32_t data_size,uint32_t ctx_id,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1086 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1087 void *data, uint32_t data_size,
1088 uint32_t ctx_id,
1089 struct virtio_gpu_object_array *objs,
1090 struct virtio_gpu_fence *fence)
1091 {
1092 struct virtio_gpu_cmd_submit *cmd_p;
1093 struct virtio_gpu_vbuffer *vbuf;
1094
1095 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1096 memset(cmd_p, 0, sizeof(*cmd_p));
1097
1098 vbuf->data_buf = data;
1099 vbuf->data_size = data_size;
1100 vbuf->objs = objs;
1101
1102 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1103 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1104 cmd_p->size = cpu_to_le32(data_size);
1105
1106 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1107 }
1108
virtio_gpu_object_attach(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * obj,struct virtio_gpu_mem_entry * ents,unsigned int nents)1109 void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1110 struct virtio_gpu_object *obj,
1111 struct virtio_gpu_mem_entry *ents,
1112 unsigned int nents)
1113 {
1114 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1115 ents, nents, NULL);
1116 }
1117
virtio_gpu_cursor_ping(struct virtio_gpu_device * vgdev,struct virtio_gpu_output * output)1118 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1119 struct virtio_gpu_output *output)
1120 {
1121 struct virtio_gpu_vbuffer *vbuf;
1122 struct virtio_gpu_update_cursor *cur_p;
1123
1124 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1125 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1126 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1127 virtio_gpu_queue_cursor(vgdev, vbuf);
1128 }
1129
virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)1130 static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
1131 struct virtio_gpu_vbuffer *vbuf)
1132 {
1133 struct virtio_gpu_object *obj =
1134 gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1135 struct virtio_gpu_resp_resource_uuid *resp =
1136 (struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
1137 uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1138
1139 spin_lock(&vgdev->resource_export_lock);
1140 WARN_ON(obj->uuid_state != STATE_INITIALIZING);
1141
1142 if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
1143 obj->uuid_state == STATE_INITIALIZING) {
1144 import_uuid(&obj->uuid, resp->uuid);
1145 obj->uuid_state = STATE_OK;
1146 } else {
1147 obj->uuid_state = STATE_ERR;
1148 }
1149 spin_unlock(&vgdev->resource_export_lock);
1150
1151 wake_up_all(&vgdev->resp_wq);
1152 }
1153
1154 int
virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device * vgdev,struct virtio_gpu_object_array * objs)1155 virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
1156 struct virtio_gpu_object_array *objs)
1157 {
1158 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1159 struct virtio_gpu_resource_assign_uuid *cmd_p;
1160 struct virtio_gpu_vbuffer *vbuf;
1161 struct virtio_gpu_resp_resource_uuid *resp_buf;
1162
1163 resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1164 if (!resp_buf) {
1165 spin_lock(&vgdev->resource_export_lock);
1166 bo->uuid_state = STATE_ERR;
1167 spin_unlock(&vgdev->resource_export_lock);
1168 virtio_gpu_array_put_free(objs);
1169 return -ENOMEM;
1170 }
1171
1172 cmd_p = virtio_gpu_alloc_cmd_resp
1173 (vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
1174 sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
1175 memset(cmd_p, 0, sizeof(*cmd_p));
1176
1177 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
1178 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1179
1180 vbuf->objs = objs;
1181 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1182 return 0;
1183 }
1184
virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)1185 static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
1186 struct virtio_gpu_vbuffer *vbuf)
1187 {
1188 struct virtio_gpu_object *bo =
1189 gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1190 struct virtio_gpu_resp_map_info *resp =
1191 (struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
1192 struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
1193 uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1194
1195 spin_lock(&vgdev->host_visible_lock);
1196
1197 if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
1198 vram->map_info = resp->map_info;
1199 vram->map_state = STATE_OK;
1200 } else {
1201 vram->map_state = STATE_ERR;
1202 }
1203
1204 spin_unlock(&vgdev->host_visible_lock);
1205 wake_up_all(&vgdev->resp_wq);
1206 }
1207
virtio_gpu_cmd_map(struct virtio_gpu_device * vgdev,struct virtio_gpu_object_array * objs,uint64_t offset)1208 int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
1209 struct virtio_gpu_object_array *objs, uint64_t offset)
1210 {
1211 struct virtio_gpu_resource_map_blob *cmd_p;
1212 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1213 struct virtio_gpu_vbuffer *vbuf;
1214 struct virtio_gpu_resp_map_info *resp_buf;
1215
1216 resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1217 if (!resp_buf)
1218 return -ENOMEM;
1219
1220 cmd_p = virtio_gpu_alloc_cmd_resp
1221 (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
1222 sizeof(struct virtio_gpu_resp_map_info), resp_buf);
1223 memset(cmd_p, 0, sizeof(*cmd_p));
1224
1225 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
1226 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1227 cmd_p->offset = cpu_to_le64(offset);
1228 vbuf->objs = objs;
1229
1230 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1231 return 0;
1232 }
1233
virtio_gpu_cmd_unmap(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo)1234 void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
1235 struct virtio_gpu_object *bo)
1236 {
1237 struct virtio_gpu_resource_unmap_blob *cmd_p;
1238 struct virtio_gpu_vbuffer *vbuf;
1239
1240 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1241 memset(cmd_p, 0, sizeof(*cmd_p));
1242
1243 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
1244 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1245
1246 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1247 }
1248
1249 void
virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_object_params * params,struct virtio_gpu_mem_entry * ents,uint32_t nents)1250 virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
1251 struct virtio_gpu_object *bo,
1252 struct virtio_gpu_object_params *params,
1253 struct virtio_gpu_mem_entry *ents,
1254 uint32_t nents)
1255 {
1256 struct virtio_gpu_resource_create_blob *cmd_p;
1257 struct virtio_gpu_vbuffer *vbuf;
1258
1259 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1260 memset(cmd_p, 0, sizeof(*cmd_p));
1261
1262 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
1263 cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
1264 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1265 cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
1266 cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
1267 cmd_p->blob_id = cpu_to_le64(params->blob_id);
1268 cmd_p->size = cpu_to_le64(params->size);
1269 cmd_p->nr_entries = cpu_to_le32(nents);
1270
1271 vbuf->data_buf = ents;
1272 vbuf->data_size = sizeof(*ents) * nents;
1273
1274 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1275 bo->created = true;
1276 }
1277
virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device * vgdev,uint32_t scanout_id,struct virtio_gpu_object * bo,struct drm_framebuffer * fb,uint32_t width,uint32_t height,uint32_t x,uint32_t y)1278 void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
1279 uint32_t scanout_id,
1280 struct virtio_gpu_object *bo,
1281 struct drm_framebuffer *fb,
1282 uint32_t width, uint32_t height,
1283 uint32_t x, uint32_t y)
1284 {
1285 uint32_t i;
1286 struct virtio_gpu_set_scanout_blob *cmd_p;
1287 struct virtio_gpu_vbuffer *vbuf;
1288 uint32_t format = virtio_gpu_translate_format(fb->format->format);
1289
1290 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1291 memset(cmd_p, 0, sizeof(*cmd_p));
1292
1293 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
1294 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1295 cmd_p->scanout_id = cpu_to_le32(scanout_id);
1296
1297 cmd_p->format = cpu_to_le32(format);
1298 cmd_p->width = cpu_to_le32(fb->width);
1299 cmd_p->height = cpu_to_le32(fb->height);
1300
1301 for (i = 0; i < 4; i++) {
1302 cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
1303 cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
1304 }
1305
1306 cmd_p->r.width = cpu_to_le32(width);
1307 cmd_p->r.height = cpu_to_le32(height);
1308 cmd_p->r.x = cpu_to_le32(x);
1309 cmd_p->r.y = cpu_to_le32(y);
1310
1311 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1312 }
1313