1 /*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie
7 * Alon Levy
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */
27
28 #include <linux/file.h>
29 #include <linux/sync_file.h>
30 #include <linux/uaccess.h>
31
32 #include <drm/drm_file.h>
33 #include <drm/virtgpu_drm.h>
34
35 #include "virtgpu_drv.h"
36
37 #define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \
38 VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
39 VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
40
virtio_gpu_fence_event_create(struct drm_device * dev,struct drm_file * file,struct virtio_gpu_fence * fence,uint32_t ring_idx)41 static int virtio_gpu_fence_event_create(struct drm_device *dev,
42 struct drm_file *file,
43 struct virtio_gpu_fence *fence,
44 uint32_t ring_idx)
45 {
46 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
47 struct virtio_gpu_fence_event *e = NULL;
48 int ret;
49
50 if (!(vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
51 return 0;
52
53 e = kzalloc(sizeof(*e), GFP_KERNEL);
54 if (!e)
55 return -ENOMEM;
56
57 e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
58 e->event.length = sizeof(e->event);
59
60 ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
61 if (ret)
62 goto free;
63
64 fence->e = e;
65 return 0;
66 free:
67 kfree(e);
68 return ret;
69 }
70
71 /* Must be called with &virtio_gpu_fpriv.struct_mutex held. */
virtio_gpu_create_context_locked(struct virtio_gpu_device * vgdev,struct virtio_gpu_fpriv * vfpriv)72 static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev,
73 struct virtio_gpu_fpriv *vfpriv)
74 {
75 char dbgname[TASK_COMM_LEN];
76
77 get_task_comm(dbgname, current);
78 virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
79 vfpriv->context_init, strlen(dbgname),
80 dbgname);
81
82 vfpriv->context_created = true;
83 }
84
virtio_gpu_create_context(struct drm_device * dev,struct drm_file * file)85 void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
86 {
87 struct virtio_gpu_device *vgdev = dev->dev_private;
88 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
89
90 mutex_lock(&vfpriv->context_lock);
91 if (vfpriv->context_created)
92 goto out_unlock;
93
94 virtio_gpu_create_context_locked(vgdev, vfpriv);
95
96 out_unlock:
97 mutex_unlock(&vfpriv->context_lock);
98 }
99
virtio_gpu_map_ioctl(struct drm_device * dev,void * data,struct drm_file * file)100 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
101 struct drm_file *file)
102 {
103 struct virtio_gpu_device *vgdev = dev->dev_private;
104 struct drm_virtgpu_map *virtio_gpu_map = data;
105
106 return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
107 virtio_gpu_map->handle,
108 &virtio_gpu_map->offset);
109 }
110
111 /*
112 * Usage of execbuffer:
113 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
114 * However, the command as passed from user space must *not* contain the initial
115 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
116 */
virtio_gpu_execbuffer_ioctl(struct drm_device * dev,void * data,struct drm_file * file)117 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
118 struct drm_file *file)
119 {
120 struct drm_virtgpu_execbuffer *exbuf = data;
121 struct virtio_gpu_device *vgdev = dev->dev_private;
122 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
123 struct virtio_gpu_fence *out_fence;
124 int ret;
125 uint32_t *bo_handles = NULL;
126 void __user *user_bo_handles = NULL;
127 struct virtio_gpu_object_array *buflist = NULL;
128 struct sync_file *sync_file;
129 int in_fence_fd = exbuf->fence_fd;
130 int out_fence_fd = -1;
131 void *buf;
132 uint64_t fence_ctx;
133 uint32_t ring_idx;
134
135 fence_ctx = vgdev->fence_drv.context;
136 ring_idx = 0;
137
138 if (vgdev->has_virgl_3d == false)
139 return -ENOSYS;
140
141 if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
142 return -EINVAL;
143
144 if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX)) {
145 if (exbuf->ring_idx >= vfpriv->num_rings)
146 return -EINVAL;
147
148 if (!vfpriv->base_fence_ctx)
149 return -EINVAL;
150
151 fence_ctx = vfpriv->base_fence_ctx;
152 ring_idx = exbuf->ring_idx;
153 }
154
155 exbuf->fence_fd = -1;
156
157 virtio_gpu_create_context(dev, file);
158 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
159 struct dma_fence *in_fence;
160
161 in_fence = sync_file_get_fence(in_fence_fd);
162
163 if (!in_fence)
164 return -EINVAL;
165
166 /*
167 * Wait if the fence is from a foreign context, or if the fence
168 * array contains any fence from a foreign context.
169 */
170 ret = 0;
171 if (!dma_fence_match_context(in_fence, fence_ctx + ring_idx))
172 ret = dma_fence_wait(in_fence, true);
173
174 dma_fence_put(in_fence);
175 if (ret)
176 return ret;
177 }
178
179 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
180 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
181 if (out_fence_fd < 0)
182 return out_fence_fd;
183 }
184
185 if (exbuf->num_bo_handles) {
186 bo_handles = kvmalloc_array(exbuf->num_bo_handles,
187 sizeof(uint32_t), GFP_KERNEL);
188 if (!bo_handles) {
189 ret = -ENOMEM;
190 goto out_unused_fd;
191 }
192
193 user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
194 if (copy_from_user(bo_handles, user_bo_handles,
195 exbuf->num_bo_handles * sizeof(uint32_t))) {
196 ret = -EFAULT;
197 goto out_unused_fd;
198 }
199
200 buflist = virtio_gpu_array_from_handles(file, bo_handles,
201 exbuf->num_bo_handles);
202 if (!buflist) {
203 ret = -ENOENT;
204 goto out_unused_fd;
205 }
206 kvfree(bo_handles);
207 bo_handles = NULL;
208 }
209
210 buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
211 if (IS_ERR(buf)) {
212 ret = PTR_ERR(buf);
213 goto out_unused_fd;
214 }
215
216 if (buflist) {
217 ret = virtio_gpu_array_lock_resv(buflist);
218 if (ret)
219 goto out_memdup;
220 }
221
222 out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
223 if(!out_fence) {
224 ret = -ENOMEM;
225 goto out_unresv;
226 }
227
228 ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
229 if (ret)
230 goto out_unresv;
231
232 if (out_fence_fd >= 0) {
233 sync_file = sync_file_create(&out_fence->f);
234 if (!sync_file) {
235 dma_fence_put(&out_fence->f);
236 ret = -ENOMEM;
237 goto out_unresv;
238 }
239
240 exbuf->fence_fd = out_fence_fd;
241 fd_install(out_fence_fd, sync_file->file);
242 }
243
244 virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
245 vfpriv->ctx_id, buflist, out_fence);
246 dma_fence_put(&out_fence->f);
247 virtio_gpu_notify(vgdev);
248 return 0;
249
250 out_unresv:
251 if (buflist)
252 virtio_gpu_array_unlock_resv(buflist);
253 out_memdup:
254 kvfree(buf);
255 out_unused_fd:
256 kvfree(bo_handles);
257 if (buflist)
258 virtio_gpu_array_put_free(buflist);
259
260 if (out_fence_fd >= 0)
261 put_unused_fd(out_fence_fd);
262
263 return ret;
264 }
265
virtio_gpu_getparam_ioctl(struct drm_device * dev,void * data,struct drm_file * file)266 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
267 struct drm_file *file)
268 {
269 struct virtio_gpu_device *vgdev = dev->dev_private;
270 struct drm_virtgpu_getparam *param = data;
271 int value;
272
273 switch (param->param) {
274 case VIRTGPU_PARAM_3D_FEATURES:
275 value = vgdev->has_virgl_3d ? 1 : 0;
276 break;
277 case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
278 value = 1;
279 break;
280 case VIRTGPU_PARAM_RESOURCE_BLOB:
281 value = vgdev->has_resource_blob ? 1 : 0;
282 break;
283 case VIRTGPU_PARAM_HOST_VISIBLE:
284 value = vgdev->has_host_visible ? 1 : 0;
285 break;
286 case VIRTGPU_PARAM_CROSS_DEVICE:
287 value = vgdev->has_resource_assign_uuid ? 1 : 0;
288 break;
289 case VIRTGPU_PARAM_CONTEXT_INIT:
290 value = vgdev->has_context_init ? 1 : 0;
291 break;
292 case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs:
293 value = vgdev->capset_id_mask;
294 break;
295 default:
296 return -EINVAL;
297 }
298 if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
299 return -EFAULT;
300
301 return 0;
302 }
303
virtio_gpu_resource_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)304 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
305 struct drm_file *file)
306 {
307 struct virtio_gpu_device *vgdev = dev->dev_private;
308 struct drm_virtgpu_resource_create *rc = data;
309 struct virtio_gpu_fence *fence;
310 int ret;
311 struct virtio_gpu_object *qobj;
312 struct drm_gem_object *obj;
313 uint32_t handle = 0;
314 struct virtio_gpu_object_params params = { 0 };
315
316 if (vgdev->has_virgl_3d) {
317 virtio_gpu_create_context(dev, file);
318 params.virgl = true;
319 params.target = rc->target;
320 params.bind = rc->bind;
321 params.depth = rc->depth;
322 params.array_size = rc->array_size;
323 params.last_level = rc->last_level;
324 params.nr_samples = rc->nr_samples;
325 params.flags = rc->flags;
326 } else {
327 if (rc->depth > 1)
328 return -EINVAL;
329 if (rc->nr_samples > 1)
330 return -EINVAL;
331 if (rc->last_level > 1)
332 return -EINVAL;
333 if (rc->target != 2)
334 return -EINVAL;
335 if (rc->array_size > 1)
336 return -EINVAL;
337 }
338
339 params.format = rc->format;
340 params.width = rc->width;
341 params.height = rc->height;
342 params.size = rc->size;
343 /* allocate a single page size object */
344 if (params.size == 0)
345 params.size = PAGE_SIZE;
346
347 fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
348 if (!fence)
349 return -ENOMEM;
350 ret = virtio_gpu_object_create(vgdev, ¶ms, &qobj, fence);
351 dma_fence_put(&fence->f);
352 if (ret < 0)
353 return ret;
354 obj = &qobj->base.base;
355
356 ret = drm_gem_handle_create(file, obj, &handle);
357 if (ret) {
358 drm_gem_object_release(obj);
359 return ret;
360 }
361 drm_gem_object_put(obj);
362
363 rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
364 rc->bo_handle = handle;
365 return 0;
366 }
367
virtio_gpu_resource_info_ioctl(struct drm_device * dev,void * data,struct drm_file * file)368 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
369 struct drm_file *file)
370 {
371 struct drm_virtgpu_resource_info *ri = data;
372 struct drm_gem_object *gobj = NULL;
373 struct virtio_gpu_object *qobj = NULL;
374
375 gobj = drm_gem_object_lookup(file, ri->bo_handle);
376 if (gobj == NULL)
377 return -ENOENT;
378
379 qobj = gem_to_virtio_gpu_obj(gobj);
380
381 ri->size = qobj->base.base.size;
382 ri->res_handle = qobj->hw_res_handle;
383 if (qobj->host3d_blob || qobj->guest_blob)
384 ri->blob_mem = qobj->blob_mem;
385
386 drm_gem_object_put(gobj);
387 return 0;
388 }
389
virtio_gpu_transfer_from_host_ioctl(struct drm_device * dev,void * data,struct drm_file * file)390 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
391 void *data,
392 struct drm_file *file)
393 {
394 struct virtio_gpu_device *vgdev = dev->dev_private;
395 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
396 struct drm_virtgpu_3d_transfer_from_host *args = data;
397 struct virtio_gpu_object *bo;
398 struct virtio_gpu_object_array *objs;
399 struct virtio_gpu_fence *fence;
400 int ret;
401 u32 offset = args->offset;
402
403 if (vgdev->has_virgl_3d == false)
404 return -ENOSYS;
405
406 virtio_gpu_create_context(dev, file);
407 objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
408 if (objs == NULL)
409 return -ENOENT;
410
411 bo = gem_to_virtio_gpu_obj(objs->objs[0]);
412 if (bo->guest_blob && !bo->host3d_blob) {
413 ret = -EINVAL;
414 goto err_put_free;
415 }
416
417 if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
418 ret = -EINVAL;
419 goto err_put_free;
420 }
421
422 ret = virtio_gpu_array_lock_resv(objs);
423 if (ret != 0)
424 goto err_put_free;
425
426 fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
427 if (!fence) {
428 ret = -ENOMEM;
429 goto err_unlock;
430 }
431
432 virtio_gpu_cmd_transfer_from_host_3d
433 (vgdev, vfpriv->ctx_id, offset, args->level, args->stride,
434 args->layer_stride, &args->box, objs, fence);
435 dma_fence_put(&fence->f);
436 virtio_gpu_notify(vgdev);
437 return 0;
438
439 err_unlock:
440 virtio_gpu_array_unlock_resv(objs);
441 err_put_free:
442 virtio_gpu_array_put_free(objs);
443 return ret;
444 }
445
virtio_gpu_transfer_to_host_ioctl(struct drm_device * dev,void * data,struct drm_file * file)446 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
447 struct drm_file *file)
448 {
449 struct virtio_gpu_device *vgdev = dev->dev_private;
450 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
451 struct drm_virtgpu_3d_transfer_to_host *args = data;
452 struct virtio_gpu_object *bo;
453 struct virtio_gpu_object_array *objs;
454 struct virtio_gpu_fence *fence;
455 int ret;
456 u32 offset = args->offset;
457
458 objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
459 if (objs == NULL)
460 return -ENOENT;
461
462 bo = gem_to_virtio_gpu_obj(objs->objs[0]);
463 if (bo->guest_blob && !bo->host3d_blob) {
464 ret = -EINVAL;
465 goto err_put_free;
466 }
467
468 if (!vgdev->has_virgl_3d) {
469 virtio_gpu_cmd_transfer_to_host_2d
470 (vgdev, offset,
471 args->box.w, args->box.h, args->box.x, args->box.y,
472 objs, NULL);
473 } else {
474 virtio_gpu_create_context(dev, file);
475
476 if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
477 ret = -EINVAL;
478 goto err_put_free;
479 }
480
481 ret = virtio_gpu_array_lock_resv(objs);
482 if (ret != 0)
483 goto err_put_free;
484
485 ret = -ENOMEM;
486 fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
487 0);
488 if (!fence)
489 goto err_unlock;
490
491 virtio_gpu_cmd_transfer_to_host_3d
492 (vgdev,
493 vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
494 args->stride, args->layer_stride, &args->box, objs,
495 fence);
496 dma_fence_put(&fence->f);
497 }
498 virtio_gpu_notify(vgdev);
499 return 0;
500
501 err_unlock:
502 virtio_gpu_array_unlock_resv(objs);
503 err_put_free:
504 virtio_gpu_array_put_free(objs);
505 return ret;
506 }
507
virtio_gpu_wait_ioctl(struct drm_device * dev,void * data,struct drm_file * file)508 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
509 struct drm_file *file)
510 {
511 struct drm_virtgpu_3d_wait *args = data;
512 struct drm_gem_object *obj;
513 long timeout = 15 * HZ;
514 int ret;
515
516 obj = drm_gem_object_lookup(file, args->handle);
517 if (obj == NULL)
518 return -ENOENT;
519
520 if (args->flags & VIRTGPU_WAIT_NOWAIT) {
521 ret = dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ);
522 } else {
523 ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ,
524 true, timeout);
525 }
526 if (ret == 0)
527 ret = -EBUSY;
528 else if (ret > 0)
529 ret = 0;
530
531 drm_gem_object_put(obj);
532 return ret;
533 }
534
virtio_gpu_get_caps_ioctl(struct drm_device * dev,void * data,struct drm_file * file)535 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
536 void *data, struct drm_file *file)
537 {
538 struct virtio_gpu_device *vgdev = dev->dev_private;
539 struct drm_virtgpu_get_caps *args = data;
540 unsigned size, host_caps_size;
541 int i;
542 int found_valid = -1;
543 int ret;
544 struct virtio_gpu_drv_cap_cache *cache_ent;
545 void *ptr;
546
547 if (vgdev->num_capsets == 0)
548 return -ENOSYS;
549
550 /* don't allow userspace to pass 0 */
551 if (args->size == 0)
552 return -EINVAL;
553
554 spin_lock(&vgdev->display_info_lock);
555 for (i = 0; i < vgdev->num_capsets; i++) {
556 if (vgdev->capsets[i].id == args->cap_set_id) {
557 if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
558 found_valid = i;
559 break;
560 }
561 }
562 }
563
564 if (found_valid == -1) {
565 spin_unlock(&vgdev->display_info_lock);
566 return -EINVAL;
567 }
568
569 host_caps_size = vgdev->capsets[found_valid].max_size;
570 /* only copy to user the minimum of the host caps size or the guest caps size */
571 size = min(args->size, host_caps_size);
572
573 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
574 if (cache_ent->id == args->cap_set_id &&
575 cache_ent->version == args->cap_set_ver) {
576 spin_unlock(&vgdev->display_info_lock);
577 goto copy_exit;
578 }
579 }
580 spin_unlock(&vgdev->display_info_lock);
581
582 /* not in cache - need to talk to hw */
583 ret = virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
584 &cache_ent);
585 if (ret)
586 return ret;
587 virtio_gpu_notify(vgdev);
588
589 copy_exit:
590 ret = wait_event_timeout(vgdev->resp_wq,
591 atomic_read(&cache_ent->is_valid), 5 * HZ);
592 if (!ret)
593 return -EBUSY;
594
595 /* is_valid check must proceed before copy of the cache entry. */
596 smp_rmb();
597
598 ptr = cache_ent->caps_cache;
599
600 if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
601 return -EFAULT;
602
603 return 0;
604 }
605
verify_blob(struct virtio_gpu_device * vgdev,struct virtio_gpu_fpriv * vfpriv,struct virtio_gpu_object_params * params,struct drm_virtgpu_resource_create_blob * rc_blob,bool * guest_blob,bool * host3d_blob)606 static int verify_blob(struct virtio_gpu_device *vgdev,
607 struct virtio_gpu_fpriv *vfpriv,
608 struct virtio_gpu_object_params *params,
609 struct drm_virtgpu_resource_create_blob *rc_blob,
610 bool *guest_blob, bool *host3d_blob)
611 {
612 if (!vgdev->has_resource_blob)
613 return -EINVAL;
614
615 if (rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK)
616 return -EINVAL;
617
618 if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
619 if (!vgdev->has_resource_assign_uuid)
620 return -EINVAL;
621 }
622
623 switch (rc_blob->blob_mem) {
624 case VIRTGPU_BLOB_MEM_GUEST:
625 *guest_blob = true;
626 break;
627 case VIRTGPU_BLOB_MEM_HOST3D_GUEST:
628 *guest_blob = true;
629 fallthrough;
630 case VIRTGPU_BLOB_MEM_HOST3D:
631 *host3d_blob = true;
632 break;
633 default:
634 return -EINVAL;
635 }
636
637 if (*host3d_blob) {
638 if (!vgdev->has_virgl_3d)
639 return -EINVAL;
640
641 /* Must be dword aligned. */
642 if (rc_blob->cmd_size % 4 != 0)
643 return -EINVAL;
644
645 params->ctx_id = vfpriv->ctx_id;
646 params->blob_id = rc_blob->blob_id;
647 } else {
648 if (rc_blob->blob_id != 0)
649 return -EINVAL;
650
651 if (rc_blob->cmd_size != 0)
652 return -EINVAL;
653 }
654
655 params->blob_mem = rc_blob->blob_mem;
656 params->size = rc_blob->size;
657 params->blob = true;
658 params->blob_flags = rc_blob->blob_flags;
659 return 0;
660 }
661
virtio_gpu_resource_create_blob_ioctl(struct drm_device * dev,void * data,struct drm_file * file)662 static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
663 void *data,
664 struct drm_file *file)
665 {
666 int ret = 0;
667 uint32_t handle = 0;
668 bool guest_blob = false;
669 bool host3d_blob = false;
670 struct drm_gem_object *obj;
671 struct virtio_gpu_object *bo;
672 struct virtio_gpu_object_params params = { 0 };
673 struct virtio_gpu_device *vgdev = dev->dev_private;
674 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
675 struct drm_virtgpu_resource_create_blob *rc_blob = data;
676
677 if (verify_blob(vgdev, vfpriv, ¶ms, rc_blob,
678 &guest_blob, &host3d_blob))
679 return -EINVAL;
680
681 if (vgdev->has_virgl_3d)
682 virtio_gpu_create_context(dev, file);
683
684 if (rc_blob->cmd_size) {
685 void *buf;
686
687 buf = memdup_user(u64_to_user_ptr(rc_blob->cmd),
688 rc_blob->cmd_size);
689
690 if (IS_ERR(buf))
691 return PTR_ERR(buf);
692
693 virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size,
694 vfpriv->ctx_id, NULL, NULL);
695 }
696
697 if (guest_blob)
698 ret = virtio_gpu_object_create(vgdev, ¶ms, &bo, NULL);
699 else if (!guest_blob && host3d_blob)
700 ret = virtio_gpu_vram_create(vgdev, ¶ms, &bo);
701 else
702 return -EINVAL;
703
704 if (ret < 0)
705 return ret;
706
707 bo->guest_blob = guest_blob;
708 bo->host3d_blob = host3d_blob;
709 bo->blob_mem = rc_blob->blob_mem;
710 bo->blob_flags = rc_blob->blob_flags;
711
712 obj = &bo->base.base;
713 if (params.blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
714 ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
715 if (ret) {
716 drm_gem_object_release(obj);
717 return ret;
718 }
719 }
720
721 ret = drm_gem_handle_create(file, obj, &handle);
722 if (ret) {
723 drm_gem_object_release(obj);
724 return ret;
725 }
726 drm_gem_object_put(obj);
727
728 rc_blob->res_handle = bo->hw_res_handle;
729 rc_blob->bo_handle = handle;
730
731 return 0;
732 }
733
virtio_gpu_context_init_ioctl(struct drm_device * dev,void * data,struct drm_file * file)734 static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
735 void *data, struct drm_file *file)
736 {
737 int ret = 0;
738 uint32_t num_params, i, param, value;
739 uint64_t valid_ring_mask;
740 size_t len;
741 struct drm_virtgpu_context_set_param *ctx_set_params = NULL;
742 struct virtio_gpu_device *vgdev = dev->dev_private;
743 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
744 struct drm_virtgpu_context_init *args = data;
745
746 num_params = args->num_params;
747 len = num_params * sizeof(struct drm_virtgpu_context_set_param);
748
749 if (!vgdev->has_context_init || !vgdev->has_virgl_3d)
750 return -EINVAL;
751
752 /* Number of unique parameters supported at this time. */
753 if (num_params > 3)
754 return -EINVAL;
755
756 ctx_set_params = memdup_user(u64_to_user_ptr(args->ctx_set_params),
757 len);
758
759 if (IS_ERR(ctx_set_params))
760 return PTR_ERR(ctx_set_params);
761
762 mutex_lock(&vfpriv->context_lock);
763 if (vfpriv->context_created) {
764 ret = -EEXIST;
765 goto out_unlock;
766 }
767
768 for (i = 0; i < num_params; i++) {
769 param = ctx_set_params[i].param;
770 value = ctx_set_params[i].value;
771
772 switch (param) {
773 case VIRTGPU_CONTEXT_PARAM_CAPSET_ID:
774 if (value > MAX_CAPSET_ID) {
775 ret = -EINVAL;
776 goto out_unlock;
777 }
778
779 if ((vgdev->capset_id_mask & (1ULL << value)) == 0) {
780 ret = -EINVAL;
781 goto out_unlock;
782 }
783
784 /* Context capset ID already set */
785 if (vfpriv->context_init &
786 VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK) {
787 ret = -EINVAL;
788 goto out_unlock;
789 }
790
791 vfpriv->context_init |= value;
792 break;
793 case VIRTGPU_CONTEXT_PARAM_NUM_RINGS:
794 if (vfpriv->base_fence_ctx) {
795 ret = -EINVAL;
796 goto out_unlock;
797 }
798
799 if (value > MAX_RINGS) {
800 ret = -EINVAL;
801 goto out_unlock;
802 }
803
804 vfpriv->base_fence_ctx = dma_fence_context_alloc(value);
805 vfpriv->num_rings = value;
806 break;
807 case VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK:
808 if (vfpriv->ring_idx_mask) {
809 ret = -EINVAL;
810 goto out_unlock;
811 }
812
813 vfpriv->ring_idx_mask = value;
814 break;
815 default:
816 ret = -EINVAL;
817 goto out_unlock;
818 }
819 }
820
821 if (vfpriv->ring_idx_mask) {
822 valid_ring_mask = 0;
823 for (i = 0; i < vfpriv->num_rings; i++)
824 valid_ring_mask |= 1ULL << i;
825
826 if (~valid_ring_mask & vfpriv->ring_idx_mask) {
827 ret = -EINVAL;
828 goto out_unlock;
829 }
830 }
831
832 virtio_gpu_create_context_locked(vgdev, vfpriv);
833 virtio_gpu_notify(vgdev);
834
835 out_unlock:
836 mutex_unlock(&vfpriv->context_lock);
837 kfree(ctx_set_params);
838 return ret;
839 }
840
841 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
842 DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
843 DRM_RENDER_ALLOW),
844
845 DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
846 DRM_RENDER_ALLOW),
847
848 DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
849 DRM_RENDER_ALLOW),
850
851 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
852 virtio_gpu_resource_create_ioctl,
853 DRM_RENDER_ALLOW),
854
855 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
856 DRM_RENDER_ALLOW),
857
858 /* make transfer async to the main ring? - no sure, can we
859 * thread these in the underlying GL
860 */
861 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
862 virtio_gpu_transfer_from_host_ioctl,
863 DRM_RENDER_ALLOW),
864 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
865 virtio_gpu_transfer_to_host_ioctl,
866 DRM_RENDER_ALLOW),
867
868 DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
869 DRM_RENDER_ALLOW),
870
871 DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
872 DRM_RENDER_ALLOW),
873
874 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
875 virtio_gpu_resource_create_blob_ioctl,
876 DRM_RENDER_ALLOW),
877
878 DRM_IOCTL_DEF_DRV(VIRTGPU_CONTEXT_INIT, virtio_gpu_context_init_ioctl,
879 DRM_RENDER_ALLOW),
880 };
881