1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 #include <linux/sync_file.h>
28
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_reg.h"
31 #include <drm/ttm/ttm_bo_api.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_so.h"
34 #include "vmwgfx_binding.h"
35
36 #define VMW_RES_HT_ORDER 12
37
38 /**
39 * enum vmw_resource_relocation_type - Relocation type for resources
40 *
41 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
42 * command stream is replaced with the actual id after validation.
43 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
44 * with a NOP.
45 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
46 * after validation is -1, the command is replaced with a NOP. Otherwise no
47 * action.
48 */
49 enum vmw_resource_relocation_type {
50 vmw_res_rel_normal,
51 vmw_res_rel_nop,
52 vmw_res_rel_cond_nop,
53 vmw_res_rel_max
54 };
55
56 /**
57 * struct vmw_resource_relocation - Relocation info for resources
58 *
59 * @head: List head for the software context's relocation list.
60 * @res: Non-ref-counted pointer to the resource.
61 * @offset: Offset of single byte entries into the command buffer where the
62 * id that needs fixup is located.
63 * @rel_type: Type of relocation.
64 */
65 struct vmw_resource_relocation {
66 struct list_head head;
67 const struct vmw_resource *res;
68 u32 offset:29;
69 enum vmw_resource_relocation_type rel_type:3;
70 };
71
72 /**
73 * struct vmw_resource_val_node - Validation info for resources
74 *
75 * @head: List head for the software context's resource list.
76 * @hash: Hash entry for quick resouce to val_node lookup.
77 * @res: Ref-counted pointer to the resource.
78 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
79 * @new_backup: Refcounted pointer to the new backup buffer.
80 * @staged_bindings: If @res is a context, tracks bindings set up during
81 * the command batch. Otherwise NULL.
82 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
83 * @first_usage: Set to true the first time the resource is referenced in
84 * the command stream.
85 * @switching_backup: The command stream provides a new backup buffer for a
86 * resource.
87 * @no_buffer_needed: This means @switching_backup is true on first buffer
88 * reference. So resource reservation does not need to allocate a backup
89 * buffer for the resource.
90 */
91 struct vmw_resource_val_node {
92 struct list_head head;
93 struct drm_hash_item hash;
94 struct vmw_resource *res;
95 struct vmw_buffer_object *new_backup;
96 struct vmw_ctx_binding_state *staged_bindings;
97 unsigned long new_backup_offset;
98 u32 first_usage : 1;
99 u32 switching_backup : 1;
100 u32 no_buffer_needed : 1;
101 };
102
103 /**
104 * struct vmw_cmd_entry - Describe a command for the verifier
105 *
106 * @user_allow: Whether allowed from the execbuf ioctl.
107 * @gb_disable: Whether disabled if guest-backed objects are available.
108 * @gb_enable: Whether enabled iff guest-backed objects are available.
109 */
110 struct vmw_cmd_entry {
111 int (*func) (struct vmw_private *, struct vmw_sw_context *,
112 SVGA3dCmdHeader *);
113 bool user_allow;
114 bool gb_disable;
115 bool gb_enable;
116 const char *cmd_name;
117 };
118
119 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
120 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
121 (_gb_disable), (_gb_enable), #_cmd}
122
123 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
124 struct vmw_sw_context *sw_context,
125 struct vmw_resource *ctx);
126 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
127 struct vmw_sw_context *sw_context,
128 SVGAMobId *id,
129 struct vmw_buffer_object **vmw_bo_p);
130 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
131 struct vmw_buffer_object *vbo,
132 bool validate_as_mob,
133 uint32_t *p_val_node);
134 /**
135 * vmw_ptr_diff - Compute the offset from a to b in bytes
136 *
137 * @a: A starting pointer.
138 * @b: A pointer offset in the same address space.
139 *
140 * Returns: The offset in bytes between the two pointers.
141 */
vmw_ptr_diff(void * a,void * b)142 static size_t vmw_ptr_diff(void *a, void *b)
143 {
144 return (unsigned long) b - (unsigned long) a;
145 }
146
147 /**
148 * vmw_resources_unreserve - unreserve resources previously reserved for
149 * command submission.
150 *
151 * @sw_context: pointer to the software context
152 * @backoff: Whether command submission failed.
153 */
vmw_resources_unreserve(struct vmw_sw_context * sw_context,bool backoff)154 static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
155 bool backoff)
156 {
157 struct vmw_resource_val_node *val;
158 struct list_head *list = &sw_context->resource_list;
159
160 if (sw_context->dx_query_mob && !backoff)
161 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
162 sw_context->dx_query_mob);
163
164 list_for_each_entry(val, list, head) {
165 struct vmw_resource *res = val->res;
166 bool switch_backup =
167 (backoff) ? false : val->switching_backup;
168
169 /*
170 * Transfer staged context bindings to the
171 * persistent context binding tracker.
172 */
173 if (unlikely(val->staged_bindings)) {
174 if (!backoff) {
175 vmw_binding_state_commit
176 (vmw_context_binding_state(val->res),
177 val->staged_bindings);
178 }
179
180 if (val->staged_bindings != sw_context->staged_bindings)
181 vmw_binding_state_free(val->staged_bindings);
182 else
183 sw_context->staged_bindings_inuse = false;
184 val->staged_bindings = NULL;
185 }
186 vmw_resource_unreserve(res, switch_backup, val->new_backup,
187 val->new_backup_offset);
188 vmw_bo_unreference(&val->new_backup);
189 }
190 }
191
192 /**
193 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
194 * added to the validate list.
195 *
196 * @dev_priv: Pointer to the device private:
197 * @sw_context: The validation context:
198 * @node: The validation node holding this context.
199 */
vmw_cmd_ctx_first_setup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource_val_node * node)200 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
201 struct vmw_sw_context *sw_context,
202 struct vmw_resource_val_node *node)
203 {
204 int ret;
205
206 ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
207 if (unlikely(ret != 0))
208 goto out_err;
209
210 if (!sw_context->staged_bindings) {
211 sw_context->staged_bindings =
212 vmw_binding_state_alloc(dev_priv);
213 if (IS_ERR(sw_context->staged_bindings)) {
214 DRM_ERROR("Failed to allocate context binding "
215 "information.\n");
216 ret = PTR_ERR(sw_context->staged_bindings);
217 sw_context->staged_bindings = NULL;
218 goto out_err;
219 }
220 }
221
222 if (sw_context->staged_bindings_inuse) {
223 node->staged_bindings = vmw_binding_state_alloc(dev_priv);
224 if (IS_ERR(node->staged_bindings)) {
225 DRM_ERROR("Failed to allocate context binding "
226 "information.\n");
227 ret = PTR_ERR(node->staged_bindings);
228 node->staged_bindings = NULL;
229 goto out_err;
230 }
231 } else {
232 node->staged_bindings = sw_context->staged_bindings;
233 sw_context->staged_bindings_inuse = true;
234 }
235
236 return 0;
237 out_err:
238 return ret;
239 }
240
241 /**
242 * vmw_resource_val_add - Add a resource to the software context's
243 * resource list if it's not already on it.
244 *
245 * @sw_context: Pointer to the software context.
246 * @res: Pointer to the resource.
247 * @p_node On successful return points to a valid pointer to a
248 * struct vmw_resource_val_node, if non-NULL on entry.
249 */
vmw_resource_val_add(struct vmw_sw_context * sw_context,struct vmw_resource * res,struct vmw_resource_val_node ** p_node)250 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
251 struct vmw_resource *res,
252 struct vmw_resource_val_node **p_node)
253 {
254 struct vmw_private *dev_priv = res->dev_priv;
255 struct vmw_resource_val_node *node;
256 struct drm_hash_item *hash;
257 int ret;
258
259 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
260 &hash) == 0)) {
261 node = container_of(hash, struct vmw_resource_val_node, hash);
262 node->first_usage = false;
263 if (unlikely(p_node != NULL))
264 *p_node = node;
265 return 0;
266 }
267
268 node = kzalloc(sizeof(*node), GFP_KERNEL);
269 if (unlikely(!node)) {
270 DRM_ERROR("Failed to allocate a resource validation "
271 "entry.\n");
272 return -ENOMEM;
273 }
274
275 node->hash.key = (unsigned long) res;
276 ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
277 if (unlikely(ret != 0)) {
278 DRM_ERROR("Failed to initialize a resource validation "
279 "entry.\n");
280 kfree(node);
281 return ret;
282 }
283 node->res = vmw_resource_reference(res);
284 node->first_usage = true;
285 if (unlikely(p_node != NULL))
286 *p_node = node;
287
288 if (!dev_priv->has_mob) {
289 list_add_tail(&node->head, &sw_context->resource_list);
290 return 0;
291 }
292
293 switch (vmw_res_type(res)) {
294 case vmw_res_context:
295 case vmw_res_dx_context:
296 list_add(&node->head, &sw_context->ctx_resource_list);
297 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
298 break;
299 case vmw_res_cotable:
300 list_add_tail(&node->head, &sw_context->ctx_resource_list);
301 break;
302 default:
303 list_add_tail(&node->head, &sw_context->resource_list);
304 break;
305 }
306
307 return ret;
308 }
309
310 /**
311 * vmw_view_res_val_add - Add a view and the surface it's pointing to
312 * to the validation list
313 *
314 * @sw_context: The software context holding the validation list.
315 * @view: Pointer to the view resource.
316 *
317 * Returns 0 if success, negative error code otherwise.
318 */
vmw_view_res_val_add(struct vmw_sw_context * sw_context,struct vmw_resource * view)319 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
320 struct vmw_resource *view)
321 {
322 int ret;
323
324 /*
325 * First add the resource the view is pointing to, otherwise
326 * it may be swapped out when the view is validated.
327 */
328 ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
329 if (ret)
330 return ret;
331
332 return vmw_resource_val_add(sw_context, view, NULL);
333 }
334
335 /**
336 * vmw_view_id_val_add - Look up a view and add it and the surface it's
337 * pointing to to the validation list.
338 *
339 * @sw_context: The software context holding the validation list.
340 * @view_type: The view type to look up.
341 * @id: view id of the view.
342 *
343 * The view is represented by a view id and the DX context it's created on,
344 * or scheduled for creation on. If there is no DX context set, the function
345 * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
346 */
vmw_view_id_val_add(struct vmw_sw_context * sw_context,enum vmw_view_type view_type,u32 id)347 static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
348 enum vmw_view_type view_type, u32 id)
349 {
350 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
351 struct vmw_resource *view;
352 int ret;
353
354 if (!ctx_node) {
355 DRM_ERROR("DX Context not set.\n");
356 return -EINVAL;
357 }
358
359 view = vmw_view_lookup(sw_context->man, view_type, id);
360 if (IS_ERR(view))
361 return PTR_ERR(view);
362
363 ret = vmw_view_res_val_add(sw_context, view);
364 vmw_resource_unreference(&view);
365
366 return ret;
367 }
368
369 /**
370 * vmw_resource_context_res_add - Put resources previously bound to a context on
371 * the validation list
372 *
373 * @dev_priv: Pointer to a device private structure
374 * @sw_context: Pointer to a software context used for this command submission
375 * @ctx: Pointer to the context resource
376 *
377 * This function puts all resources that were previously bound to @ctx on
378 * the resource validation list. This is part of the context state reemission
379 */
vmw_resource_context_res_add(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource * ctx)380 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
381 struct vmw_sw_context *sw_context,
382 struct vmw_resource *ctx)
383 {
384 struct list_head *binding_list;
385 struct vmw_ctx_bindinfo *entry;
386 int ret = 0;
387 struct vmw_resource *res;
388 u32 i;
389
390 /* Add all cotables to the validation list. */
391 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
392 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
393 res = vmw_context_cotable(ctx, i);
394 if (IS_ERR(res))
395 continue;
396
397 ret = vmw_resource_val_add(sw_context, res, NULL);
398 vmw_resource_unreference(&res);
399 if (unlikely(ret != 0))
400 return ret;
401 }
402 }
403
404
405 /* Add all resources bound to the context to the validation list */
406 mutex_lock(&dev_priv->binding_mutex);
407 binding_list = vmw_context_binding_list(ctx);
408
409 list_for_each_entry(entry, binding_list, ctx_list) {
410 /* entry->res is not refcounted */
411 res = vmw_resource_reference_unless_doomed(entry->res);
412 if (unlikely(res == NULL))
413 continue;
414
415 if (vmw_res_type(entry->res) == vmw_res_view)
416 ret = vmw_view_res_val_add(sw_context, entry->res);
417 else
418 ret = vmw_resource_val_add(sw_context, entry->res,
419 NULL);
420 vmw_resource_unreference(&res);
421 if (unlikely(ret != 0))
422 break;
423 }
424
425 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
426 struct vmw_buffer_object *dx_query_mob;
427
428 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
429 if (dx_query_mob)
430 ret = vmw_bo_to_validate_list(sw_context,
431 dx_query_mob,
432 true, NULL);
433 }
434
435 mutex_unlock(&dev_priv->binding_mutex);
436 return ret;
437 }
438
439 /**
440 * vmw_resource_relocation_add - Add a relocation to the relocation list
441 *
442 * @list: Pointer to head of relocation list.
443 * @res: The resource.
444 * @offset: Offset into the command buffer currently being parsed where the
445 * id that needs fixup is located. Granularity is one byte.
446 * @rel_type: Relocation type.
447 */
vmw_resource_relocation_add(struct list_head * list,const struct vmw_resource * res,unsigned long offset,enum vmw_resource_relocation_type rel_type)448 static int vmw_resource_relocation_add(struct list_head *list,
449 const struct vmw_resource *res,
450 unsigned long offset,
451 enum vmw_resource_relocation_type
452 rel_type)
453 {
454 struct vmw_resource_relocation *rel;
455
456 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
457 if (unlikely(!rel)) {
458 DRM_ERROR("Failed to allocate a resource relocation.\n");
459 return -ENOMEM;
460 }
461
462 rel->res = res;
463 rel->offset = offset;
464 rel->rel_type = rel_type;
465 list_add_tail(&rel->head, list);
466
467 return 0;
468 }
469
470 /**
471 * vmw_resource_relocations_free - Free all relocations on a list
472 *
473 * @list: Pointer to the head of the relocation list.
474 */
vmw_resource_relocations_free(struct list_head * list)475 static void vmw_resource_relocations_free(struct list_head *list)
476 {
477 struct vmw_resource_relocation *rel, *n;
478
479 list_for_each_entry_safe(rel, n, list, head) {
480 list_del(&rel->head);
481 kfree(rel);
482 }
483 }
484
485 /**
486 * vmw_resource_relocations_apply - Apply all relocations on a list
487 *
488 * @cb: Pointer to the start of the command buffer bein patch. This need
489 * not be the same buffer as the one being parsed when the relocation
490 * list was built, but the contents must be the same modulo the
491 * resource ids.
492 * @list: Pointer to the head of the relocation list.
493 */
vmw_resource_relocations_apply(uint32_t * cb,struct list_head * list)494 static void vmw_resource_relocations_apply(uint32_t *cb,
495 struct list_head *list)
496 {
497 struct vmw_resource_relocation *rel;
498
499 /* Validate the struct vmw_resource_relocation member size */
500 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
501 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
502
503 list_for_each_entry(rel, list, head) {
504 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
505 switch (rel->rel_type) {
506 case vmw_res_rel_normal:
507 *addr = rel->res->id;
508 break;
509 case vmw_res_rel_nop:
510 *addr = SVGA_3D_CMD_NOP;
511 break;
512 default:
513 if (rel->res->id == -1)
514 *addr = SVGA_3D_CMD_NOP;
515 break;
516 }
517 }
518 }
519
vmw_cmd_invalid(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)520 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
521 struct vmw_sw_context *sw_context,
522 SVGA3dCmdHeader *header)
523 {
524 return -EINVAL;
525 }
526
vmw_cmd_ok(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)527 static int vmw_cmd_ok(struct vmw_private *dev_priv,
528 struct vmw_sw_context *sw_context,
529 SVGA3dCmdHeader *header)
530 {
531 return 0;
532 }
533
534 /**
535 * vmw_bo_to_validate_list - add a bo to a validate list
536 *
537 * @sw_context: The software context used for this command submission batch.
538 * @bo: The buffer object to add.
539 * @validate_as_mob: Validate this buffer as a MOB.
540 * @p_val_node: If non-NULL Will be updated with the validate node number
541 * on return.
542 *
543 * Returns -EINVAL if the limit of number of buffer objects per command
544 * submission is reached.
545 */
vmw_bo_to_validate_list(struct vmw_sw_context * sw_context,struct vmw_buffer_object * vbo,bool validate_as_mob,uint32_t * p_val_node)546 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
547 struct vmw_buffer_object *vbo,
548 bool validate_as_mob,
549 uint32_t *p_val_node)
550 {
551 uint32_t val_node;
552 struct vmw_validate_buffer *vval_buf;
553 struct ttm_validate_buffer *val_buf;
554 struct drm_hash_item *hash;
555 int ret;
556
557 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
558 &hash) == 0)) {
559 vval_buf = container_of(hash, struct vmw_validate_buffer,
560 hash);
561 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
562 DRM_ERROR("Inconsistent buffer usage.\n");
563 return -EINVAL;
564 }
565 val_buf = &vval_buf->base;
566 val_node = vval_buf - sw_context->val_bufs;
567 } else {
568 val_node = sw_context->cur_val_buf;
569 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
570 DRM_ERROR("Max number of DMA buffers per submission "
571 "exceeded.\n");
572 return -EINVAL;
573 }
574 vval_buf = &sw_context->val_bufs[val_node];
575 vval_buf->hash.key = (unsigned long) vbo;
576 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
577 if (unlikely(ret != 0)) {
578 DRM_ERROR("Failed to initialize a buffer validation "
579 "entry.\n");
580 return ret;
581 }
582 ++sw_context->cur_val_buf;
583 val_buf = &vval_buf->base;
584 val_buf->bo = ttm_bo_reference(&vbo->base);
585 val_buf->shared = false;
586 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
587 vval_buf->validate_as_mob = validate_as_mob;
588 }
589
590 if (p_val_node)
591 *p_val_node = val_node;
592
593 return 0;
594 }
595
596 /**
597 * vmw_resources_reserve - Reserve all resources on the sw_context's
598 * resource list.
599 *
600 * @sw_context: Pointer to the software context.
601 *
602 * Note that since vmware's command submission currently is protected by
603 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
604 * since only a single thread at once will attempt this.
605 */
vmw_resources_reserve(struct vmw_sw_context * sw_context)606 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
607 {
608 struct vmw_resource_val_node *val;
609 int ret = 0;
610
611 list_for_each_entry(val, &sw_context->resource_list, head) {
612 struct vmw_resource *res = val->res;
613
614 ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
615 if (unlikely(ret != 0))
616 return ret;
617
618 if (res->backup) {
619 struct vmw_buffer_object *vbo = res->backup;
620
621 ret = vmw_bo_to_validate_list
622 (sw_context, vbo,
623 vmw_resource_needs_backup(res), NULL);
624
625 if (unlikely(ret != 0))
626 return ret;
627 }
628 }
629
630 if (sw_context->dx_query_mob) {
631 struct vmw_buffer_object *expected_dx_query_mob;
632
633 expected_dx_query_mob =
634 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
635 if (expected_dx_query_mob &&
636 expected_dx_query_mob != sw_context->dx_query_mob) {
637 ret = -EINVAL;
638 }
639 }
640
641 return ret;
642 }
643
644 /**
645 * vmw_resources_validate - Validate all resources on the sw_context's
646 * resource list.
647 *
648 * @sw_context: Pointer to the software context.
649 *
650 * Before this function is called, all resource backup buffers must have
651 * been validated.
652 */
vmw_resources_validate(struct vmw_sw_context * sw_context)653 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
654 {
655 struct vmw_resource_val_node *val;
656 int ret;
657
658 list_for_each_entry(val, &sw_context->resource_list, head) {
659 struct vmw_resource *res = val->res;
660 struct vmw_buffer_object *backup = res->backup;
661
662 ret = vmw_resource_validate(res);
663 if (unlikely(ret != 0)) {
664 if (ret != -ERESTARTSYS)
665 DRM_ERROR("Failed to validate resource.\n");
666 return ret;
667 }
668
669 /* Check if the resource switched backup buffer */
670 if (backup && res->backup && (backup != res->backup)) {
671 struct vmw_buffer_object *vbo = res->backup;
672
673 ret = vmw_bo_to_validate_list
674 (sw_context, vbo,
675 vmw_resource_needs_backup(res), NULL);
676 if (ret) {
677 ttm_bo_unreserve(&vbo->base);
678 return ret;
679 }
680 }
681 }
682 return 0;
683 }
684
685 /**
686 * vmw_cmd_res_reloc_add - Add a resource to a software context's
687 * relocation- and validation lists.
688 *
689 * @dev_priv: Pointer to a struct vmw_private identifying the device.
690 * @sw_context: Pointer to the software context.
691 * @id_loc: Pointer to where the id that needs translation is located.
692 * @res: Valid pointer to a struct vmw_resource.
693 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
694 * used for this resource is returned here.
695 */
vmw_cmd_res_reloc_add(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,uint32_t * id_loc,struct vmw_resource * res,struct vmw_resource_val_node ** p_val)696 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
697 struct vmw_sw_context *sw_context,
698 uint32_t *id_loc,
699 struct vmw_resource *res,
700 struct vmw_resource_val_node **p_val)
701 {
702 int ret;
703 struct vmw_resource_val_node *node;
704
705 *p_val = NULL;
706 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
707 res,
708 vmw_ptr_diff(sw_context->buf_start,
709 id_loc),
710 vmw_res_rel_normal);
711 if (unlikely(ret != 0))
712 return ret;
713
714 ret = vmw_resource_val_add(sw_context, res, &node);
715 if (unlikely(ret != 0))
716 return ret;
717
718 if (p_val)
719 *p_val = node;
720
721 return 0;
722 }
723
724
725 /**
726 * vmw_cmd_res_check - Check that a resource is present and if so, put it
727 * on the resource validate list unless it's already there.
728 *
729 * @dev_priv: Pointer to a device private structure.
730 * @sw_context: Pointer to the software context.
731 * @res_type: Resource type.
732 * @converter: User-space visisble type specific information.
733 * @id_loc: Pointer to the location in the command buffer currently being
734 * parsed from where the user-space resource id handle is located.
735 * @p_val: Pointer to pointer to resource validalidation node. Populated
736 * on exit.
737 */
738 static int
vmw_cmd_res_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,enum vmw_res_type res_type,const struct vmw_user_resource_conv * converter,uint32_t * id_loc,struct vmw_resource_val_node ** p_val)739 vmw_cmd_res_check(struct vmw_private *dev_priv,
740 struct vmw_sw_context *sw_context,
741 enum vmw_res_type res_type,
742 const struct vmw_user_resource_conv *converter,
743 uint32_t *id_loc,
744 struct vmw_resource_val_node **p_val)
745 {
746 struct vmw_res_cache_entry *rcache =
747 &sw_context->res_cache[res_type];
748 struct vmw_resource *res;
749 struct vmw_resource_val_node *node;
750 int ret;
751
752 if (*id_loc == SVGA3D_INVALID_ID) {
753 if (p_val)
754 *p_val = NULL;
755 if (res_type == vmw_res_context) {
756 DRM_ERROR("Illegal context invalid id.\n");
757 return -EINVAL;
758 }
759 return 0;
760 }
761
762 /*
763 * Fastpath in case of repeated commands referencing the same
764 * resource
765 */
766
767 if (likely(rcache->valid && *id_loc == rcache->handle)) {
768 const struct vmw_resource *res = rcache->res;
769
770 rcache->node->first_usage = false;
771 if (p_val)
772 *p_val = rcache->node;
773
774 return vmw_resource_relocation_add
775 (&sw_context->res_relocations, res,
776 vmw_ptr_diff(sw_context->buf_start, id_loc),
777 vmw_res_rel_normal);
778 }
779
780 ret = vmw_user_resource_lookup_handle(dev_priv,
781 sw_context->fp->tfile,
782 *id_loc,
783 converter,
784 &res);
785 if (unlikely(ret != 0)) {
786 DRM_ERROR("Could not find or use resource 0x%08x.\n",
787 (unsigned) *id_loc);
788 dump_stack();
789 return ret;
790 }
791
792 rcache->valid = true;
793 rcache->res = res;
794 rcache->handle = *id_loc;
795
796 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
797 res, &node);
798 if (unlikely(ret != 0))
799 goto out_no_reloc;
800
801 rcache->node = node;
802 if (p_val)
803 *p_val = node;
804 vmw_resource_unreference(&res);
805 return 0;
806
807 out_no_reloc:
808 BUG_ON(sw_context->error_resource != NULL);
809 sw_context->error_resource = res;
810
811 return ret;
812 }
813
814 /**
815 * vmw_rebind_dx_query - Rebind DX query associated with the context
816 *
817 * @ctx_res: context the query belongs to
818 *
819 * This function assumes binding_mutex is held.
820 */
vmw_rebind_all_dx_query(struct vmw_resource * ctx_res)821 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
822 {
823 struct vmw_private *dev_priv = ctx_res->dev_priv;
824 struct vmw_buffer_object *dx_query_mob;
825 struct {
826 SVGA3dCmdHeader header;
827 SVGA3dCmdDXBindAllQuery body;
828 } *cmd;
829
830
831 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
832
833 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
834 return 0;
835
836 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
837
838 if (cmd == NULL) {
839 DRM_ERROR("Failed to rebind queries.\n");
840 return -ENOMEM;
841 }
842
843 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
844 cmd->header.size = sizeof(cmd->body);
845 cmd->body.cid = ctx_res->id;
846 cmd->body.mobid = dx_query_mob->base.mem.start;
847 vmw_fifo_commit(dev_priv, sizeof(*cmd));
848
849 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
850
851 return 0;
852 }
853
854 /**
855 * vmw_rebind_contexts - Rebind all resources previously bound to
856 * referenced contexts.
857 *
858 * @sw_context: Pointer to the software context.
859 *
860 * Rebind context binding points that have been scrubbed because of eviction.
861 */
vmw_rebind_contexts(struct vmw_sw_context * sw_context)862 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
863 {
864 struct vmw_resource_val_node *val;
865 int ret;
866
867 list_for_each_entry(val, &sw_context->resource_list, head) {
868 if (unlikely(!val->staged_bindings))
869 break;
870
871 ret = vmw_binding_rebind_all
872 (vmw_context_binding_state(val->res));
873 if (unlikely(ret != 0)) {
874 if (ret != -ERESTARTSYS)
875 DRM_ERROR("Failed to rebind context.\n");
876 return ret;
877 }
878
879 ret = vmw_rebind_all_dx_query(val->res);
880 if (ret != 0)
881 return ret;
882 }
883
884 return 0;
885 }
886
887 /**
888 * vmw_view_bindings_add - Add an array of view bindings to a context
889 * binding state tracker.
890 *
891 * @sw_context: The execbuf state used for this command.
892 * @view_type: View type for the bindings.
893 * @binding_type: Binding type for the bindings.
894 * @shader_slot: The shader slot to user for the bindings.
895 * @view_ids: Array of view ids to be bound.
896 * @num_views: Number of view ids in @view_ids.
897 * @first_slot: The binding slot to be used for the first view id in @view_ids.
898 */
vmw_view_bindings_add(struct vmw_sw_context * sw_context,enum vmw_view_type view_type,enum vmw_ctx_binding_type binding_type,uint32 shader_slot,uint32 view_ids[],u32 num_views,u32 first_slot)899 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
900 enum vmw_view_type view_type,
901 enum vmw_ctx_binding_type binding_type,
902 uint32 shader_slot,
903 uint32 view_ids[], u32 num_views,
904 u32 first_slot)
905 {
906 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
907 struct vmw_cmdbuf_res_manager *man;
908 u32 i;
909 int ret;
910
911 if (!ctx_node) {
912 DRM_ERROR("DX Context not set.\n");
913 return -EINVAL;
914 }
915
916 man = sw_context->man;
917 for (i = 0; i < num_views; ++i) {
918 struct vmw_ctx_bindinfo_view binding;
919 struct vmw_resource *view = NULL;
920
921 if (view_ids[i] != SVGA3D_INVALID_ID) {
922 view = vmw_view_lookup(man, view_type, view_ids[i]);
923 if (IS_ERR(view)) {
924 DRM_ERROR("View not found.\n");
925 return PTR_ERR(view);
926 }
927
928 ret = vmw_view_res_val_add(sw_context, view);
929 if (ret) {
930 DRM_ERROR("Could not add view to "
931 "validation list.\n");
932 vmw_resource_unreference(&view);
933 return ret;
934 }
935 }
936 binding.bi.ctx = ctx_node->res;
937 binding.bi.res = view;
938 binding.bi.bt = binding_type;
939 binding.shader_slot = shader_slot;
940 binding.slot = first_slot + i;
941 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
942 shader_slot, binding.slot);
943 if (view)
944 vmw_resource_unreference(&view);
945 }
946
947 return 0;
948 }
949
950 /**
951 * vmw_cmd_cid_check - Check a command header for valid context information.
952 *
953 * @dev_priv: Pointer to a device private structure.
954 * @sw_context: Pointer to the software context.
955 * @header: A command header with an embedded user-space context handle.
956 *
957 * Convenience function: Call vmw_cmd_res_check with the user-space context
958 * handle embedded in @header.
959 */
vmw_cmd_cid_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)960 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
961 struct vmw_sw_context *sw_context,
962 SVGA3dCmdHeader *header)
963 {
964 struct vmw_cid_cmd {
965 SVGA3dCmdHeader header;
966 uint32_t cid;
967 } *cmd;
968
969 cmd = container_of(header, struct vmw_cid_cmd, header);
970 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
971 user_context_converter, &cmd->cid, NULL);
972 }
973
vmw_cmd_set_render_target_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)974 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
975 struct vmw_sw_context *sw_context,
976 SVGA3dCmdHeader *header)
977 {
978 struct vmw_sid_cmd {
979 SVGA3dCmdHeader header;
980 SVGA3dCmdSetRenderTarget body;
981 } *cmd;
982 struct vmw_resource_val_node *ctx_node;
983 struct vmw_resource_val_node *res_node;
984 int ret;
985
986 cmd = container_of(header, struct vmw_sid_cmd, header);
987
988 if (cmd->body.type >= SVGA3D_RT_MAX) {
989 DRM_ERROR("Illegal render target type %u.\n",
990 (unsigned) cmd->body.type);
991 return -EINVAL;
992 }
993
994 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
995 user_context_converter, &cmd->body.cid,
996 &ctx_node);
997 if (unlikely(ret != 0))
998 return ret;
999
1000 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1001 user_surface_converter,
1002 &cmd->body.target.sid, &res_node);
1003 if (unlikely(ret != 0))
1004 return ret;
1005
1006 if (dev_priv->has_mob) {
1007 struct vmw_ctx_bindinfo_view binding;
1008
1009 binding.bi.ctx = ctx_node->res;
1010 binding.bi.res = res_node ? res_node->res : NULL;
1011 binding.bi.bt = vmw_ctx_binding_rt;
1012 binding.slot = cmd->body.type;
1013 vmw_binding_add(ctx_node->staged_bindings,
1014 &binding.bi, 0, binding.slot);
1015 }
1016
1017 return 0;
1018 }
1019
vmw_cmd_surface_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1020 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
1021 struct vmw_sw_context *sw_context,
1022 SVGA3dCmdHeader *header)
1023 {
1024 struct vmw_sid_cmd {
1025 SVGA3dCmdHeader header;
1026 SVGA3dCmdSurfaceCopy body;
1027 } *cmd;
1028 int ret;
1029
1030 cmd = container_of(header, struct vmw_sid_cmd, header);
1031
1032 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1033 user_surface_converter,
1034 &cmd->body.src.sid, NULL);
1035 if (ret)
1036 return ret;
1037
1038 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1039 user_surface_converter,
1040 &cmd->body.dest.sid, NULL);
1041 }
1042
vmw_cmd_buffer_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1043 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
1044 struct vmw_sw_context *sw_context,
1045 SVGA3dCmdHeader *header)
1046 {
1047 struct {
1048 SVGA3dCmdHeader header;
1049 SVGA3dCmdDXBufferCopy body;
1050 } *cmd;
1051 int ret;
1052
1053 cmd = container_of(header, typeof(*cmd), header);
1054 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1055 user_surface_converter,
1056 &cmd->body.src, NULL);
1057 if (ret != 0)
1058 return ret;
1059
1060 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1061 user_surface_converter,
1062 &cmd->body.dest, NULL);
1063 }
1064
vmw_cmd_pred_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1065 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
1066 struct vmw_sw_context *sw_context,
1067 SVGA3dCmdHeader *header)
1068 {
1069 struct {
1070 SVGA3dCmdHeader header;
1071 SVGA3dCmdDXPredCopyRegion body;
1072 } *cmd;
1073 int ret;
1074
1075 cmd = container_of(header, typeof(*cmd), header);
1076 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1077 user_surface_converter,
1078 &cmd->body.srcSid, NULL);
1079 if (ret != 0)
1080 return ret;
1081
1082 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1083 user_surface_converter,
1084 &cmd->body.dstSid, NULL);
1085 }
1086
vmw_cmd_stretch_blt_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1087 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
1088 struct vmw_sw_context *sw_context,
1089 SVGA3dCmdHeader *header)
1090 {
1091 struct vmw_sid_cmd {
1092 SVGA3dCmdHeader header;
1093 SVGA3dCmdSurfaceStretchBlt body;
1094 } *cmd;
1095 int ret;
1096
1097 cmd = container_of(header, struct vmw_sid_cmd, header);
1098 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1099 user_surface_converter,
1100 &cmd->body.src.sid, NULL);
1101 if (unlikely(ret != 0))
1102 return ret;
1103 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1104 user_surface_converter,
1105 &cmd->body.dest.sid, NULL);
1106 }
1107
vmw_cmd_blt_surf_screen_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1108 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1109 struct vmw_sw_context *sw_context,
1110 SVGA3dCmdHeader *header)
1111 {
1112 struct vmw_sid_cmd {
1113 SVGA3dCmdHeader header;
1114 SVGA3dCmdBlitSurfaceToScreen body;
1115 } *cmd;
1116
1117 cmd = container_of(header, struct vmw_sid_cmd, header);
1118
1119 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1120 user_surface_converter,
1121 &cmd->body.srcImage.sid, NULL);
1122 }
1123
vmw_cmd_present_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1124 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1125 struct vmw_sw_context *sw_context,
1126 SVGA3dCmdHeader *header)
1127 {
1128 struct vmw_sid_cmd {
1129 SVGA3dCmdHeader header;
1130 SVGA3dCmdPresent body;
1131 } *cmd;
1132
1133
1134 cmd = container_of(header, struct vmw_sid_cmd, header);
1135
1136 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1137 user_surface_converter, &cmd->body.sid,
1138 NULL);
1139 }
1140
1141 /**
1142 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1143 *
1144 * @dev_priv: The device private structure.
1145 * @new_query_bo: The new buffer holding query results.
1146 * @sw_context: The software context used for this command submission.
1147 *
1148 * This function checks whether @new_query_bo is suitable for holding
1149 * query results, and if another buffer currently is pinned for query
1150 * results. If so, the function prepares the state of @sw_context for
1151 * switching pinned buffers after successful submission of the current
1152 * command batch.
1153 */
vmw_query_bo_switch_prepare(struct vmw_private * dev_priv,struct vmw_buffer_object * new_query_bo,struct vmw_sw_context * sw_context)1154 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1155 struct vmw_buffer_object *new_query_bo,
1156 struct vmw_sw_context *sw_context)
1157 {
1158 struct vmw_res_cache_entry *ctx_entry =
1159 &sw_context->res_cache[vmw_res_context];
1160 int ret;
1161
1162 BUG_ON(!ctx_entry->valid);
1163 sw_context->last_query_ctx = ctx_entry->res;
1164
1165 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1166
1167 if (unlikely(new_query_bo->base.num_pages > 4)) {
1168 DRM_ERROR("Query buffer too large.\n");
1169 return -EINVAL;
1170 }
1171
1172 if (unlikely(sw_context->cur_query_bo != NULL)) {
1173 sw_context->needs_post_query_barrier = true;
1174 ret = vmw_bo_to_validate_list(sw_context,
1175 sw_context->cur_query_bo,
1176 dev_priv->has_mob, NULL);
1177 if (unlikely(ret != 0))
1178 return ret;
1179 }
1180 sw_context->cur_query_bo = new_query_bo;
1181
1182 ret = vmw_bo_to_validate_list(sw_context,
1183 dev_priv->dummy_query_bo,
1184 dev_priv->has_mob, NULL);
1185 if (unlikely(ret != 0))
1186 return ret;
1187
1188 }
1189
1190 return 0;
1191 }
1192
1193
1194 /**
1195 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1196 *
1197 * @dev_priv: The device private structure.
1198 * @sw_context: The software context used for this command submission batch.
1199 *
1200 * This function will check if we're switching query buffers, and will then,
1201 * issue a dummy occlusion query wait used as a query barrier. When the fence
1202 * object following that query wait has signaled, we are sure that all
1203 * preceding queries have finished, and the old query buffer can be unpinned.
1204 * However, since both the new query buffer and the old one are fenced with
1205 * that fence, we can do an asynchronus unpin now, and be sure that the
1206 * old query buffer won't be moved until the fence has signaled.
1207 *
1208 * As mentioned above, both the new - and old query buffers need to be fenced
1209 * using a sequence emitted *after* calling this function.
1210 */
vmw_query_bo_switch_commit(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context)1211 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1212 struct vmw_sw_context *sw_context)
1213 {
1214 /*
1215 * The validate list should still hold references to all
1216 * contexts here.
1217 */
1218
1219 if (sw_context->needs_post_query_barrier) {
1220 struct vmw_res_cache_entry *ctx_entry =
1221 &sw_context->res_cache[vmw_res_context];
1222 struct vmw_resource *ctx;
1223 int ret;
1224
1225 BUG_ON(!ctx_entry->valid);
1226 ctx = ctx_entry->res;
1227
1228 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1229
1230 if (unlikely(ret != 0))
1231 DRM_ERROR("Out of fifo space for dummy query.\n");
1232 }
1233
1234 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1235 if (dev_priv->pinned_bo) {
1236 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1237 vmw_bo_unreference(&dev_priv->pinned_bo);
1238 }
1239
1240 if (!sw_context->needs_post_query_barrier) {
1241 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1242
1243 /*
1244 * We pin also the dummy_query_bo buffer so that we
1245 * don't need to validate it when emitting
1246 * dummy queries in context destroy paths.
1247 */
1248
1249 if (!dev_priv->dummy_query_bo_pinned) {
1250 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1251 true);
1252 dev_priv->dummy_query_bo_pinned = true;
1253 }
1254
1255 BUG_ON(sw_context->last_query_ctx == NULL);
1256 dev_priv->query_cid = sw_context->last_query_ctx->id;
1257 dev_priv->query_cid_valid = true;
1258 dev_priv->pinned_bo =
1259 vmw_bo_reference(sw_context->cur_query_bo);
1260 }
1261 }
1262 }
1263
1264 /**
1265 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1266 * handle to a MOB id.
1267 *
1268 * @dev_priv: Pointer to a device private structure.
1269 * @sw_context: The software context used for this command batch validation.
1270 * @id: Pointer to the user-space handle to be translated.
1271 * @vmw_bo_p: Points to a location that, on successful return will carry
1272 * a reference-counted pointer to the DMA buffer identified by the
1273 * user-space handle in @id.
1274 *
1275 * This function saves information needed to translate a user-space buffer
1276 * handle to a MOB id. The translation does not take place immediately, but
1277 * during a call to vmw_apply_relocations(). This function builds a relocation
1278 * list and a list of buffers to validate. The former needs to be freed using
1279 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1280 * needs to be freed using vmw_clear_validations.
1281 */
vmw_translate_mob_ptr(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGAMobId * id,struct vmw_buffer_object ** vmw_bo_p)1282 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1283 struct vmw_sw_context *sw_context,
1284 SVGAMobId *id,
1285 struct vmw_buffer_object **vmw_bo_p)
1286 {
1287 struct vmw_buffer_object *vmw_bo = NULL;
1288 uint32_t handle = *id;
1289 struct vmw_relocation *reloc;
1290 int ret;
1291
1292 ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
1293 if (unlikely(ret != 0)) {
1294 DRM_ERROR("Could not find or use MOB buffer.\n");
1295 ret = -EINVAL;
1296 goto out_no_reloc;
1297 }
1298
1299 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1300 DRM_ERROR("Max number relocations per submission"
1301 " exceeded\n");
1302 ret = -EINVAL;
1303 goto out_no_reloc;
1304 }
1305
1306 reloc = &sw_context->relocs[sw_context->cur_reloc++];
1307 reloc->mob_loc = id;
1308 reloc->location = NULL;
1309
1310 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
1311 if (unlikely(ret != 0))
1312 goto out_no_reloc;
1313
1314 *vmw_bo_p = vmw_bo;
1315 return 0;
1316
1317 out_no_reloc:
1318 vmw_bo_unreference(&vmw_bo);
1319 *vmw_bo_p = NULL;
1320 return ret;
1321 }
1322
1323 /**
1324 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1325 * handle to a valid SVGAGuestPtr
1326 *
1327 * @dev_priv: Pointer to a device private structure.
1328 * @sw_context: The software context used for this command batch validation.
1329 * @ptr: Pointer to the user-space handle to be translated.
1330 * @vmw_bo_p: Points to a location that, on successful return will carry
1331 * a reference-counted pointer to the DMA buffer identified by the
1332 * user-space handle in @id.
1333 *
1334 * This function saves information needed to translate a user-space buffer
1335 * handle to a valid SVGAGuestPtr. The translation does not take place
1336 * immediately, but during a call to vmw_apply_relocations().
1337 * This function builds a relocation list and a list of buffers to validate.
1338 * The former needs to be freed using either vmw_apply_relocations() or
1339 * vmw_free_relocations(). The latter needs to be freed using
1340 * vmw_clear_validations.
1341 */
vmw_translate_guest_ptr(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGAGuestPtr * ptr,struct vmw_buffer_object ** vmw_bo_p)1342 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1343 struct vmw_sw_context *sw_context,
1344 SVGAGuestPtr *ptr,
1345 struct vmw_buffer_object **vmw_bo_p)
1346 {
1347 struct vmw_buffer_object *vmw_bo = NULL;
1348 uint32_t handle = ptr->gmrId;
1349 struct vmw_relocation *reloc;
1350 int ret;
1351
1352 ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
1353 if (unlikely(ret != 0)) {
1354 DRM_ERROR("Could not find or use GMR region.\n");
1355 ret = -EINVAL;
1356 goto out_no_reloc;
1357 }
1358
1359 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1360 DRM_ERROR("Max number relocations per submission"
1361 " exceeded\n");
1362 ret = -EINVAL;
1363 goto out_no_reloc;
1364 }
1365
1366 reloc = &sw_context->relocs[sw_context->cur_reloc++];
1367 reloc->location = ptr;
1368
1369 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
1370 if (unlikely(ret != 0))
1371 goto out_no_reloc;
1372
1373 *vmw_bo_p = vmw_bo;
1374 return 0;
1375
1376 out_no_reloc:
1377 vmw_bo_unreference(&vmw_bo);
1378 *vmw_bo_p = NULL;
1379 return ret;
1380 }
1381
1382
1383
1384 /**
1385 * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1386 *
1387 * @dev_priv: Pointer to a device private struct.
1388 * @sw_context: The software context used for this command submission.
1389 * @header: Pointer to the command header in the command stream.
1390 *
1391 * This function adds the new query into the query COTABLE
1392 */
vmw_cmd_dx_define_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1393 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1394 struct vmw_sw_context *sw_context,
1395 SVGA3dCmdHeader *header)
1396 {
1397 struct vmw_dx_define_query_cmd {
1398 SVGA3dCmdHeader header;
1399 SVGA3dCmdDXDefineQuery q;
1400 } *cmd;
1401
1402 int ret;
1403 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
1404 struct vmw_resource *cotable_res;
1405
1406
1407 if (ctx_node == NULL) {
1408 DRM_ERROR("DX Context not set for query.\n");
1409 return -EINVAL;
1410 }
1411
1412 cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1413
1414 if (cmd->q.type < SVGA3D_QUERYTYPE_MIN ||
1415 cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1416 return -EINVAL;
1417
1418 cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
1419 ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1420 vmw_resource_unreference(&cotable_res);
1421
1422 return ret;
1423 }
1424
1425
1426
1427 /**
1428 * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1429 *
1430 * @dev_priv: Pointer to a device private struct.
1431 * @sw_context: The software context used for this command submission.
1432 * @header: Pointer to the command header in the command stream.
1433 *
1434 * The query bind operation will eventually associate the query ID
1435 * with its backing MOB. In this function, we take the user mode
1436 * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1437 * kernel mode equivalent.
1438 */
vmw_cmd_dx_bind_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1439 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1440 struct vmw_sw_context *sw_context,
1441 SVGA3dCmdHeader *header)
1442 {
1443 struct vmw_dx_bind_query_cmd {
1444 SVGA3dCmdHeader header;
1445 SVGA3dCmdDXBindQuery q;
1446 } *cmd;
1447
1448 struct vmw_buffer_object *vmw_bo;
1449 int ret;
1450
1451
1452 cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1453
1454 /*
1455 * Look up the buffer pointed to by q.mobid, put it on the relocation
1456 * list so its kernel mode MOB ID can be filled in later
1457 */
1458 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1459 &vmw_bo);
1460
1461 if (ret != 0)
1462 return ret;
1463
1464 sw_context->dx_query_mob = vmw_bo;
1465 sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1466
1467 vmw_bo_unreference(&vmw_bo);
1468
1469 return ret;
1470 }
1471
1472
1473
1474 /**
1475 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
1476 *
1477 * @dev_priv: Pointer to a device private struct.
1478 * @sw_context: The software context used for this command submission.
1479 * @header: Pointer to the command header in the command stream.
1480 */
vmw_cmd_begin_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1481 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1482 struct vmw_sw_context *sw_context,
1483 SVGA3dCmdHeader *header)
1484 {
1485 struct vmw_begin_gb_query_cmd {
1486 SVGA3dCmdHeader header;
1487 SVGA3dCmdBeginGBQuery q;
1488 } *cmd;
1489
1490 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1491 header);
1492
1493 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1494 user_context_converter, &cmd->q.cid,
1495 NULL);
1496 }
1497
1498 /**
1499 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
1500 *
1501 * @dev_priv: Pointer to a device private struct.
1502 * @sw_context: The software context used for this command submission.
1503 * @header: Pointer to the command header in the command stream.
1504 */
vmw_cmd_begin_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1505 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1506 struct vmw_sw_context *sw_context,
1507 SVGA3dCmdHeader *header)
1508 {
1509 struct vmw_begin_query_cmd {
1510 SVGA3dCmdHeader header;
1511 SVGA3dCmdBeginQuery q;
1512 } *cmd;
1513
1514 cmd = container_of(header, struct vmw_begin_query_cmd,
1515 header);
1516
1517 if (unlikely(dev_priv->has_mob)) {
1518 struct {
1519 SVGA3dCmdHeader header;
1520 SVGA3dCmdBeginGBQuery q;
1521 } gb_cmd;
1522
1523 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1524
1525 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1526 gb_cmd.header.size = cmd->header.size;
1527 gb_cmd.q.cid = cmd->q.cid;
1528 gb_cmd.q.type = cmd->q.type;
1529
1530 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1531 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1532 }
1533
1534 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1535 user_context_converter, &cmd->q.cid,
1536 NULL);
1537 }
1538
1539 /**
1540 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
1541 *
1542 * @dev_priv: Pointer to a device private struct.
1543 * @sw_context: The software context used for this command submission.
1544 * @header: Pointer to the command header in the command stream.
1545 */
vmw_cmd_end_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1546 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1547 struct vmw_sw_context *sw_context,
1548 SVGA3dCmdHeader *header)
1549 {
1550 struct vmw_buffer_object *vmw_bo;
1551 struct vmw_query_cmd {
1552 SVGA3dCmdHeader header;
1553 SVGA3dCmdEndGBQuery q;
1554 } *cmd;
1555 int ret;
1556
1557 cmd = container_of(header, struct vmw_query_cmd, header);
1558 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1559 if (unlikely(ret != 0))
1560 return ret;
1561
1562 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1563 &cmd->q.mobid,
1564 &vmw_bo);
1565 if (unlikely(ret != 0))
1566 return ret;
1567
1568 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1569
1570 vmw_bo_unreference(&vmw_bo);
1571 return ret;
1572 }
1573
1574 /**
1575 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
1576 *
1577 * @dev_priv: Pointer to a device private struct.
1578 * @sw_context: The software context used for this command submission.
1579 * @header: Pointer to the command header in the command stream.
1580 */
vmw_cmd_end_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1581 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1582 struct vmw_sw_context *sw_context,
1583 SVGA3dCmdHeader *header)
1584 {
1585 struct vmw_buffer_object *vmw_bo;
1586 struct vmw_query_cmd {
1587 SVGA3dCmdHeader header;
1588 SVGA3dCmdEndQuery q;
1589 } *cmd;
1590 int ret;
1591
1592 cmd = container_of(header, struct vmw_query_cmd, header);
1593 if (dev_priv->has_mob) {
1594 struct {
1595 SVGA3dCmdHeader header;
1596 SVGA3dCmdEndGBQuery q;
1597 } gb_cmd;
1598
1599 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1600
1601 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1602 gb_cmd.header.size = cmd->header.size;
1603 gb_cmd.q.cid = cmd->q.cid;
1604 gb_cmd.q.type = cmd->q.type;
1605 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1606 gb_cmd.q.offset = cmd->q.guestResult.offset;
1607
1608 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1609 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1610 }
1611
1612 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1613 if (unlikely(ret != 0))
1614 return ret;
1615
1616 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1617 &cmd->q.guestResult,
1618 &vmw_bo);
1619 if (unlikely(ret != 0))
1620 return ret;
1621
1622 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1623
1624 vmw_bo_unreference(&vmw_bo);
1625 return ret;
1626 }
1627
1628 /**
1629 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1630 *
1631 * @dev_priv: Pointer to a device private struct.
1632 * @sw_context: The software context used for this command submission.
1633 * @header: Pointer to the command header in the command stream.
1634 */
vmw_cmd_wait_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1635 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1636 struct vmw_sw_context *sw_context,
1637 SVGA3dCmdHeader *header)
1638 {
1639 struct vmw_buffer_object *vmw_bo;
1640 struct vmw_query_cmd {
1641 SVGA3dCmdHeader header;
1642 SVGA3dCmdWaitForGBQuery q;
1643 } *cmd;
1644 int ret;
1645
1646 cmd = container_of(header, struct vmw_query_cmd, header);
1647 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1648 if (unlikely(ret != 0))
1649 return ret;
1650
1651 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1652 &cmd->q.mobid,
1653 &vmw_bo);
1654 if (unlikely(ret != 0))
1655 return ret;
1656
1657 vmw_bo_unreference(&vmw_bo);
1658 return 0;
1659 }
1660
1661 /**
1662 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1663 *
1664 * @dev_priv: Pointer to a device private struct.
1665 * @sw_context: The software context used for this command submission.
1666 * @header: Pointer to the command header in the command stream.
1667 */
vmw_cmd_wait_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1668 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1669 struct vmw_sw_context *sw_context,
1670 SVGA3dCmdHeader *header)
1671 {
1672 struct vmw_buffer_object *vmw_bo;
1673 struct vmw_query_cmd {
1674 SVGA3dCmdHeader header;
1675 SVGA3dCmdWaitForQuery q;
1676 } *cmd;
1677 int ret;
1678
1679 cmd = container_of(header, struct vmw_query_cmd, header);
1680 if (dev_priv->has_mob) {
1681 struct {
1682 SVGA3dCmdHeader header;
1683 SVGA3dCmdWaitForGBQuery q;
1684 } gb_cmd;
1685
1686 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1687
1688 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1689 gb_cmd.header.size = cmd->header.size;
1690 gb_cmd.q.cid = cmd->q.cid;
1691 gb_cmd.q.type = cmd->q.type;
1692 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1693 gb_cmd.q.offset = cmd->q.guestResult.offset;
1694
1695 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1696 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1697 }
1698
1699 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1700 if (unlikely(ret != 0))
1701 return ret;
1702
1703 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1704 &cmd->q.guestResult,
1705 &vmw_bo);
1706 if (unlikely(ret != 0))
1707 return ret;
1708
1709 vmw_bo_unreference(&vmw_bo);
1710 return 0;
1711 }
1712
vmw_cmd_dma(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1713 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1714 struct vmw_sw_context *sw_context,
1715 SVGA3dCmdHeader *header)
1716 {
1717 struct vmw_buffer_object *vmw_bo = NULL;
1718 struct vmw_surface *srf = NULL;
1719 struct vmw_dma_cmd {
1720 SVGA3dCmdHeader header;
1721 SVGA3dCmdSurfaceDMA dma;
1722 } *cmd;
1723 int ret;
1724 SVGA3dCmdSurfaceDMASuffix *suffix;
1725 uint32_t bo_size;
1726
1727 cmd = container_of(header, struct vmw_dma_cmd, header);
1728 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1729 header->size - sizeof(*suffix));
1730
1731 /* Make sure device and verifier stays in sync. */
1732 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1733 DRM_ERROR("Invalid DMA suffix size.\n");
1734 return -EINVAL;
1735 }
1736
1737 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1738 &cmd->dma.guest.ptr,
1739 &vmw_bo);
1740 if (unlikely(ret != 0))
1741 return ret;
1742
1743 /* Make sure DMA doesn't cross BO boundaries. */
1744 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1745 if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1746 DRM_ERROR("Invalid DMA offset.\n");
1747 return -EINVAL;
1748 }
1749
1750 bo_size -= cmd->dma.guest.ptr.offset;
1751 if (unlikely(suffix->maximumOffset > bo_size))
1752 suffix->maximumOffset = bo_size;
1753
1754 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1755 user_surface_converter, &cmd->dma.host.sid,
1756 NULL);
1757 if (unlikely(ret != 0)) {
1758 if (unlikely(ret != -ERESTARTSYS))
1759 DRM_ERROR("could not find surface for DMA.\n");
1760 goto out_no_surface;
1761 }
1762
1763 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1764
1765 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1766 header);
1767
1768 out_no_surface:
1769 vmw_bo_unreference(&vmw_bo);
1770 return ret;
1771 }
1772
vmw_cmd_draw(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1773 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1774 struct vmw_sw_context *sw_context,
1775 SVGA3dCmdHeader *header)
1776 {
1777 struct vmw_draw_cmd {
1778 SVGA3dCmdHeader header;
1779 SVGA3dCmdDrawPrimitives body;
1780 } *cmd;
1781 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1782 (unsigned long)header + sizeof(*cmd));
1783 SVGA3dPrimitiveRange *range;
1784 uint32_t i;
1785 uint32_t maxnum;
1786 int ret;
1787
1788 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1789 if (unlikely(ret != 0))
1790 return ret;
1791
1792 cmd = container_of(header, struct vmw_draw_cmd, header);
1793 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1794
1795 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1796 DRM_ERROR("Illegal number of vertex declarations.\n");
1797 return -EINVAL;
1798 }
1799
1800 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1801 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1802 user_surface_converter,
1803 &decl->array.surfaceId, NULL);
1804 if (unlikely(ret != 0))
1805 return ret;
1806 }
1807
1808 maxnum = (header->size - sizeof(cmd->body) -
1809 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1810 if (unlikely(cmd->body.numRanges > maxnum)) {
1811 DRM_ERROR("Illegal number of index ranges.\n");
1812 return -EINVAL;
1813 }
1814
1815 range = (SVGA3dPrimitiveRange *) decl;
1816 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1817 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1818 user_surface_converter,
1819 &range->indexArray.surfaceId, NULL);
1820 if (unlikely(ret != 0))
1821 return ret;
1822 }
1823 return 0;
1824 }
1825
1826
vmw_cmd_tex_state(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1827 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1828 struct vmw_sw_context *sw_context,
1829 SVGA3dCmdHeader *header)
1830 {
1831 struct vmw_tex_state_cmd {
1832 SVGA3dCmdHeader header;
1833 SVGA3dCmdSetTextureState state;
1834 } *cmd;
1835
1836 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1837 ((unsigned long) header + header->size + sizeof(header));
1838 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1839 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1840 struct vmw_resource_val_node *ctx_node;
1841 struct vmw_resource_val_node *res_node;
1842 int ret;
1843
1844 cmd = container_of(header, struct vmw_tex_state_cmd,
1845 header);
1846
1847 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1848 user_context_converter, &cmd->state.cid,
1849 &ctx_node);
1850 if (unlikely(ret != 0))
1851 return ret;
1852
1853 for (; cur_state < last_state; ++cur_state) {
1854 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1855 continue;
1856
1857 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1858 DRM_ERROR("Illegal texture/sampler unit %u.\n",
1859 (unsigned) cur_state->stage);
1860 return -EINVAL;
1861 }
1862
1863 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1864 user_surface_converter,
1865 &cur_state->value, &res_node);
1866 if (unlikely(ret != 0))
1867 return ret;
1868
1869 if (dev_priv->has_mob) {
1870 struct vmw_ctx_bindinfo_tex binding;
1871
1872 binding.bi.ctx = ctx_node->res;
1873 binding.bi.res = res_node ? res_node->res : NULL;
1874 binding.bi.bt = vmw_ctx_binding_tex;
1875 binding.texture_stage = cur_state->stage;
1876 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1877 0, binding.texture_stage);
1878 }
1879 }
1880
1881 return 0;
1882 }
1883
vmw_cmd_check_define_gmrfb(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf)1884 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1885 struct vmw_sw_context *sw_context,
1886 void *buf)
1887 {
1888 struct vmw_buffer_object *vmw_bo;
1889 int ret;
1890
1891 struct {
1892 uint32_t header;
1893 SVGAFifoCmdDefineGMRFB body;
1894 } *cmd = buf;
1895
1896 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1897 &cmd->body.ptr,
1898 &vmw_bo);
1899 if (unlikely(ret != 0))
1900 return ret;
1901
1902 vmw_bo_unreference(&vmw_bo);
1903
1904 return ret;
1905 }
1906
1907
1908 /**
1909 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1910 * switching
1911 *
1912 * @dev_priv: Pointer to a device private struct.
1913 * @sw_context: The software context being used for this batch.
1914 * @val_node: The validation node representing the resource.
1915 * @buf_id: Pointer to the user-space backup buffer handle in the command
1916 * stream.
1917 * @backup_offset: Offset of backup into MOB.
1918 *
1919 * This function prepares for registering a switch of backup buffers
1920 * in the resource metadata just prior to unreserving. It's basically a wrapper
1921 * around vmw_cmd_res_switch_backup with a different interface.
1922 */
vmw_cmd_res_switch_backup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource_val_node * val_node,uint32_t * buf_id,unsigned long backup_offset)1923 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1924 struct vmw_sw_context *sw_context,
1925 struct vmw_resource_val_node *val_node,
1926 uint32_t *buf_id,
1927 unsigned long backup_offset)
1928 {
1929 struct vmw_buffer_object *dma_buf;
1930 int ret;
1931
1932 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1933 if (ret)
1934 return ret;
1935
1936 val_node->switching_backup = true;
1937 if (val_node->first_usage)
1938 val_node->no_buffer_needed = true;
1939
1940 vmw_bo_unreference(&val_node->new_backup);
1941 val_node->new_backup = dma_buf;
1942 val_node->new_backup_offset = backup_offset;
1943
1944 return 0;
1945 }
1946
1947
1948 /**
1949 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1950 *
1951 * @dev_priv: Pointer to a device private struct.
1952 * @sw_context: The software context being used for this batch.
1953 * @res_type: The resource type.
1954 * @converter: Information about user-space binding for this resource type.
1955 * @res_id: Pointer to the user-space resource handle in the command stream.
1956 * @buf_id: Pointer to the user-space backup buffer handle in the command
1957 * stream.
1958 * @backup_offset: Offset of backup into MOB.
1959 *
1960 * This function prepares for registering a switch of backup buffers
1961 * in the resource metadata just prior to unreserving. It's basically a wrapper
1962 * around vmw_cmd_res_switch_backup with a different interface.
1963 */
vmw_cmd_switch_backup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,enum vmw_res_type res_type,const struct vmw_user_resource_conv * converter,uint32_t * res_id,uint32_t * buf_id,unsigned long backup_offset)1964 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1965 struct vmw_sw_context *sw_context,
1966 enum vmw_res_type res_type,
1967 const struct vmw_user_resource_conv
1968 *converter,
1969 uint32_t *res_id,
1970 uint32_t *buf_id,
1971 unsigned long backup_offset)
1972 {
1973 struct vmw_resource_val_node *val_node;
1974 int ret;
1975
1976 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1977 converter, res_id, &val_node);
1978 if (ret)
1979 return ret;
1980
1981 return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1982 buf_id, backup_offset);
1983 }
1984
1985 /**
1986 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1987 * command
1988 *
1989 * @dev_priv: Pointer to a device private struct.
1990 * @sw_context: The software context being used for this batch.
1991 * @header: Pointer to the command header in the command stream.
1992 */
vmw_cmd_bind_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1993 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1994 struct vmw_sw_context *sw_context,
1995 SVGA3dCmdHeader *header)
1996 {
1997 struct vmw_bind_gb_surface_cmd {
1998 SVGA3dCmdHeader header;
1999 SVGA3dCmdBindGBSurface body;
2000 } *cmd;
2001
2002 cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
2003
2004 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
2005 user_surface_converter,
2006 &cmd->body.sid, &cmd->body.mobid,
2007 0);
2008 }
2009
2010 /**
2011 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
2012 * command
2013 *
2014 * @dev_priv: Pointer to a device private struct.
2015 * @sw_context: The software context being used for this batch.
2016 * @header: Pointer to the command header in the command stream.
2017 */
vmw_cmd_update_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2018 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
2019 struct vmw_sw_context *sw_context,
2020 SVGA3dCmdHeader *header)
2021 {
2022 struct vmw_gb_surface_cmd {
2023 SVGA3dCmdHeader header;
2024 SVGA3dCmdUpdateGBImage body;
2025 } *cmd;
2026
2027 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2028
2029 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2030 user_surface_converter,
2031 &cmd->body.image.sid, NULL);
2032 }
2033
2034 /**
2035 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
2036 * command
2037 *
2038 * @dev_priv: Pointer to a device private struct.
2039 * @sw_context: The software context being used for this batch.
2040 * @header: Pointer to the command header in the command stream.
2041 */
vmw_cmd_update_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2042 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
2043 struct vmw_sw_context *sw_context,
2044 SVGA3dCmdHeader *header)
2045 {
2046 struct vmw_gb_surface_cmd {
2047 SVGA3dCmdHeader header;
2048 SVGA3dCmdUpdateGBSurface body;
2049 } *cmd;
2050
2051 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2052
2053 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2054 user_surface_converter,
2055 &cmd->body.sid, NULL);
2056 }
2057
2058 /**
2059 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
2060 * command
2061 *
2062 * @dev_priv: Pointer to a device private struct.
2063 * @sw_context: The software context being used for this batch.
2064 * @header: Pointer to the command header in the command stream.
2065 */
vmw_cmd_readback_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2066 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
2067 struct vmw_sw_context *sw_context,
2068 SVGA3dCmdHeader *header)
2069 {
2070 struct vmw_gb_surface_cmd {
2071 SVGA3dCmdHeader header;
2072 SVGA3dCmdReadbackGBImage body;
2073 } *cmd;
2074
2075 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2076
2077 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2078 user_surface_converter,
2079 &cmd->body.image.sid, NULL);
2080 }
2081
2082 /**
2083 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
2084 * command
2085 *
2086 * @dev_priv: Pointer to a device private struct.
2087 * @sw_context: The software context being used for this batch.
2088 * @header: Pointer to the command header in the command stream.
2089 */
vmw_cmd_readback_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2090 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
2091 struct vmw_sw_context *sw_context,
2092 SVGA3dCmdHeader *header)
2093 {
2094 struct vmw_gb_surface_cmd {
2095 SVGA3dCmdHeader header;
2096 SVGA3dCmdReadbackGBSurface body;
2097 } *cmd;
2098
2099 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2100
2101 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2102 user_surface_converter,
2103 &cmd->body.sid, NULL);
2104 }
2105
2106 /**
2107 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
2108 * command
2109 *
2110 * @dev_priv: Pointer to a device private struct.
2111 * @sw_context: The software context being used for this batch.
2112 * @header: Pointer to the command header in the command stream.
2113 */
vmw_cmd_invalidate_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2114 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
2115 struct vmw_sw_context *sw_context,
2116 SVGA3dCmdHeader *header)
2117 {
2118 struct vmw_gb_surface_cmd {
2119 SVGA3dCmdHeader header;
2120 SVGA3dCmdInvalidateGBImage body;
2121 } *cmd;
2122
2123 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2124
2125 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2126 user_surface_converter,
2127 &cmd->body.image.sid, NULL);
2128 }
2129
2130 /**
2131 * vmw_cmd_invalidate_gb_surface - Validate an
2132 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
2133 *
2134 * @dev_priv: Pointer to a device private struct.
2135 * @sw_context: The software context being used for this batch.
2136 * @header: Pointer to the command header in the command stream.
2137 */
vmw_cmd_invalidate_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2138 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
2139 struct vmw_sw_context *sw_context,
2140 SVGA3dCmdHeader *header)
2141 {
2142 struct vmw_gb_surface_cmd {
2143 SVGA3dCmdHeader header;
2144 SVGA3dCmdInvalidateGBSurface body;
2145 } *cmd;
2146
2147 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2148
2149 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2150 user_surface_converter,
2151 &cmd->body.sid, NULL);
2152 }
2153
2154
2155 /**
2156 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2157 * command
2158 *
2159 * @dev_priv: Pointer to a device private struct.
2160 * @sw_context: The software context being used for this batch.
2161 * @header: Pointer to the command header in the command stream.
2162 */
vmw_cmd_shader_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2163 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2164 struct vmw_sw_context *sw_context,
2165 SVGA3dCmdHeader *header)
2166 {
2167 struct vmw_shader_define_cmd {
2168 SVGA3dCmdHeader header;
2169 SVGA3dCmdDefineShader body;
2170 } *cmd;
2171 int ret;
2172 size_t size;
2173 struct vmw_resource_val_node *val;
2174
2175 cmd = container_of(header, struct vmw_shader_define_cmd,
2176 header);
2177
2178 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2179 user_context_converter, &cmd->body.cid,
2180 &val);
2181 if (unlikely(ret != 0))
2182 return ret;
2183
2184 if (unlikely(!dev_priv->has_mob))
2185 return 0;
2186
2187 size = cmd->header.size - sizeof(cmd->body);
2188 ret = vmw_compat_shader_add(dev_priv,
2189 vmw_context_res_man(val->res),
2190 cmd->body.shid, cmd + 1,
2191 cmd->body.type, size,
2192 &sw_context->staged_cmd_res);
2193 if (unlikely(ret != 0))
2194 return ret;
2195
2196 return vmw_resource_relocation_add(&sw_context->res_relocations,
2197 NULL,
2198 vmw_ptr_diff(sw_context->buf_start,
2199 &cmd->header.id),
2200 vmw_res_rel_nop);
2201 }
2202
2203 /**
2204 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2205 * command
2206 *
2207 * @dev_priv: Pointer to a device private struct.
2208 * @sw_context: The software context being used for this batch.
2209 * @header: Pointer to the command header in the command stream.
2210 */
vmw_cmd_shader_destroy(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2211 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2212 struct vmw_sw_context *sw_context,
2213 SVGA3dCmdHeader *header)
2214 {
2215 struct vmw_shader_destroy_cmd {
2216 SVGA3dCmdHeader header;
2217 SVGA3dCmdDestroyShader body;
2218 } *cmd;
2219 int ret;
2220 struct vmw_resource_val_node *val;
2221
2222 cmd = container_of(header, struct vmw_shader_destroy_cmd,
2223 header);
2224
2225 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2226 user_context_converter, &cmd->body.cid,
2227 &val);
2228 if (unlikely(ret != 0))
2229 return ret;
2230
2231 if (unlikely(!dev_priv->has_mob))
2232 return 0;
2233
2234 ret = vmw_shader_remove(vmw_context_res_man(val->res),
2235 cmd->body.shid,
2236 cmd->body.type,
2237 &sw_context->staged_cmd_res);
2238 if (unlikely(ret != 0))
2239 return ret;
2240
2241 return vmw_resource_relocation_add(&sw_context->res_relocations,
2242 NULL,
2243 vmw_ptr_diff(sw_context->buf_start,
2244 &cmd->header.id),
2245 vmw_res_rel_nop);
2246 }
2247
2248 /**
2249 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2250 * command
2251 *
2252 * @dev_priv: Pointer to a device private struct.
2253 * @sw_context: The software context being used for this batch.
2254 * @header: Pointer to the command header in the command stream.
2255 */
vmw_cmd_set_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2256 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2257 struct vmw_sw_context *sw_context,
2258 SVGA3dCmdHeader *header)
2259 {
2260 struct vmw_set_shader_cmd {
2261 SVGA3dCmdHeader header;
2262 SVGA3dCmdSetShader body;
2263 } *cmd;
2264 struct vmw_resource_val_node *ctx_node, *res_node = NULL;
2265 struct vmw_ctx_bindinfo_shader binding;
2266 struct vmw_resource *res = NULL;
2267 int ret;
2268
2269 cmd = container_of(header, struct vmw_set_shader_cmd,
2270 header);
2271
2272 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2273 DRM_ERROR("Illegal shader type %u.\n",
2274 (unsigned) cmd->body.type);
2275 return -EINVAL;
2276 }
2277
2278 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2279 user_context_converter, &cmd->body.cid,
2280 &ctx_node);
2281 if (unlikely(ret != 0))
2282 return ret;
2283
2284 if (!dev_priv->has_mob)
2285 return 0;
2286
2287 if (cmd->body.shid != SVGA3D_INVALID_ID) {
2288 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2289 cmd->body.shid,
2290 cmd->body.type);
2291
2292 if (!IS_ERR(res)) {
2293 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
2294 &cmd->body.shid, res,
2295 &res_node);
2296 vmw_resource_unreference(&res);
2297 if (unlikely(ret != 0))
2298 return ret;
2299 }
2300 }
2301
2302 if (!res_node) {
2303 ret = vmw_cmd_res_check(dev_priv, sw_context,
2304 vmw_res_shader,
2305 user_shader_converter,
2306 &cmd->body.shid, &res_node);
2307 if (unlikely(ret != 0))
2308 return ret;
2309 }
2310
2311 binding.bi.ctx = ctx_node->res;
2312 binding.bi.res = res_node ? res_node->res : NULL;
2313 binding.bi.bt = vmw_ctx_binding_shader;
2314 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2315 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2316 binding.shader_slot, 0);
2317 return 0;
2318 }
2319
2320 /**
2321 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2322 * command
2323 *
2324 * @dev_priv: Pointer to a device private struct.
2325 * @sw_context: The software context being used for this batch.
2326 * @header: Pointer to the command header in the command stream.
2327 */
vmw_cmd_set_shader_const(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2328 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2329 struct vmw_sw_context *sw_context,
2330 SVGA3dCmdHeader *header)
2331 {
2332 struct vmw_set_shader_const_cmd {
2333 SVGA3dCmdHeader header;
2334 SVGA3dCmdSetShaderConst body;
2335 } *cmd;
2336 int ret;
2337
2338 cmd = container_of(header, struct vmw_set_shader_const_cmd,
2339 header);
2340
2341 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2342 user_context_converter, &cmd->body.cid,
2343 NULL);
2344 if (unlikely(ret != 0))
2345 return ret;
2346
2347 if (dev_priv->has_mob)
2348 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2349
2350 return 0;
2351 }
2352
2353 /**
2354 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2355 * command
2356 *
2357 * @dev_priv: Pointer to a device private struct.
2358 * @sw_context: The software context being used for this batch.
2359 * @header: Pointer to the command header in the command stream.
2360 */
vmw_cmd_bind_gb_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2361 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2362 struct vmw_sw_context *sw_context,
2363 SVGA3dCmdHeader *header)
2364 {
2365 struct vmw_bind_gb_shader_cmd {
2366 SVGA3dCmdHeader header;
2367 SVGA3dCmdBindGBShader body;
2368 } *cmd;
2369
2370 cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2371 header);
2372
2373 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2374 user_shader_converter,
2375 &cmd->body.shid, &cmd->body.mobid,
2376 cmd->body.offsetInBytes);
2377 }
2378
2379 /**
2380 * vmw_cmd_dx_set_single_constant_buffer - Validate an
2381 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2382 *
2383 * @dev_priv: Pointer to a device private struct.
2384 * @sw_context: The software context being used for this batch.
2385 * @header: Pointer to the command header in the command stream.
2386 */
2387 static int
vmw_cmd_dx_set_single_constant_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2388 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2389 struct vmw_sw_context *sw_context,
2390 SVGA3dCmdHeader *header)
2391 {
2392 struct {
2393 SVGA3dCmdHeader header;
2394 SVGA3dCmdDXSetSingleConstantBuffer body;
2395 } *cmd;
2396 struct vmw_resource_val_node *res_node = NULL;
2397 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2398 struct vmw_ctx_bindinfo_cb binding;
2399 int ret;
2400
2401 if (unlikely(ctx_node == NULL)) {
2402 DRM_ERROR("DX Context not set.\n");
2403 return -EINVAL;
2404 }
2405
2406 cmd = container_of(header, typeof(*cmd), header);
2407 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2408 user_surface_converter,
2409 &cmd->body.sid, &res_node);
2410 if (unlikely(ret != 0))
2411 return ret;
2412
2413 binding.bi.ctx = ctx_node->res;
2414 binding.bi.res = res_node ? res_node->res : NULL;
2415 binding.bi.bt = vmw_ctx_binding_cb;
2416 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2417 binding.offset = cmd->body.offsetInBytes;
2418 binding.size = cmd->body.sizeInBytes;
2419 binding.slot = cmd->body.slot;
2420
2421 if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2422 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2423 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2424 (unsigned) cmd->body.type,
2425 (unsigned) binding.slot);
2426 return -EINVAL;
2427 }
2428
2429 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2430 binding.shader_slot, binding.slot);
2431
2432 return 0;
2433 }
2434
2435 /**
2436 * vmw_cmd_dx_set_shader_res - Validate an
2437 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2438 *
2439 * @dev_priv: Pointer to a device private struct.
2440 * @sw_context: The software context being used for this batch.
2441 * @header: Pointer to the command header in the command stream.
2442 */
vmw_cmd_dx_set_shader_res(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2443 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2444 struct vmw_sw_context *sw_context,
2445 SVGA3dCmdHeader *header)
2446 {
2447 struct {
2448 SVGA3dCmdHeader header;
2449 SVGA3dCmdDXSetShaderResources body;
2450 } *cmd = container_of(header, typeof(*cmd), header);
2451 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2452 sizeof(SVGA3dShaderResourceViewId);
2453
2454 if ((u64) cmd->body.startView + (u64) num_sr_view >
2455 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2456 cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2457 DRM_ERROR("Invalid shader binding.\n");
2458 return -EINVAL;
2459 }
2460
2461 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2462 vmw_ctx_binding_sr,
2463 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2464 (void *) &cmd[1], num_sr_view,
2465 cmd->body.startView);
2466 }
2467
2468 /**
2469 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2470 * command
2471 *
2472 * @dev_priv: Pointer to a device private struct.
2473 * @sw_context: The software context being used for this batch.
2474 * @header: Pointer to the command header in the command stream.
2475 */
vmw_cmd_dx_set_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2476 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2477 struct vmw_sw_context *sw_context,
2478 SVGA3dCmdHeader *header)
2479 {
2480 struct {
2481 SVGA3dCmdHeader header;
2482 SVGA3dCmdDXSetShader body;
2483 } *cmd;
2484 struct vmw_resource *res = NULL;
2485 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2486 struct vmw_ctx_bindinfo_shader binding;
2487 int ret = 0;
2488
2489 if (unlikely(ctx_node == NULL)) {
2490 DRM_ERROR("DX Context not set.\n");
2491 return -EINVAL;
2492 }
2493
2494 cmd = container_of(header, typeof(*cmd), header);
2495
2496 if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2497 DRM_ERROR("Illegal shader type %u.\n",
2498 (unsigned) cmd->body.type);
2499 return -EINVAL;
2500 }
2501
2502 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2503 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2504 if (IS_ERR(res)) {
2505 DRM_ERROR("Could not find shader for binding.\n");
2506 return PTR_ERR(res);
2507 }
2508
2509 ret = vmw_resource_val_add(sw_context, res, NULL);
2510 if (ret)
2511 goto out_unref;
2512 }
2513
2514 binding.bi.ctx = ctx_node->res;
2515 binding.bi.res = res;
2516 binding.bi.bt = vmw_ctx_binding_dx_shader;
2517 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2518
2519 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2520 binding.shader_slot, 0);
2521 out_unref:
2522 if (res)
2523 vmw_resource_unreference(&res);
2524
2525 return ret;
2526 }
2527
2528 /**
2529 * vmw_cmd_dx_set_vertex_buffers - Validates an
2530 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2531 *
2532 * @dev_priv: Pointer to a device private struct.
2533 * @sw_context: The software context being used for this batch.
2534 * @header: Pointer to the command header in the command stream.
2535 */
vmw_cmd_dx_set_vertex_buffers(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2536 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2537 struct vmw_sw_context *sw_context,
2538 SVGA3dCmdHeader *header)
2539 {
2540 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2541 struct vmw_ctx_bindinfo_vb binding;
2542 struct vmw_resource_val_node *res_node;
2543 struct {
2544 SVGA3dCmdHeader header;
2545 SVGA3dCmdDXSetVertexBuffers body;
2546 SVGA3dVertexBuffer buf[];
2547 } *cmd;
2548 int i, ret, num;
2549
2550 if (unlikely(ctx_node == NULL)) {
2551 DRM_ERROR("DX Context not set.\n");
2552 return -EINVAL;
2553 }
2554
2555 cmd = container_of(header, typeof(*cmd), header);
2556 num = (cmd->header.size - sizeof(cmd->body)) /
2557 sizeof(SVGA3dVertexBuffer);
2558 if ((u64)num + (u64)cmd->body.startBuffer >
2559 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2560 DRM_ERROR("Invalid number of vertex buffers.\n");
2561 return -EINVAL;
2562 }
2563
2564 for (i = 0; i < num; i++) {
2565 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2566 user_surface_converter,
2567 &cmd->buf[i].sid, &res_node);
2568 if (unlikely(ret != 0))
2569 return ret;
2570
2571 binding.bi.ctx = ctx_node->res;
2572 binding.bi.bt = vmw_ctx_binding_vb;
2573 binding.bi.res = ((res_node) ? res_node->res : NULL);
2574 binding.offset = cmd->buf[i].offset;
2575 binding.stride = cmd->buf[i].stride;
2576 binding.slot = i + cmd->body.startBuffer;
2577
2578 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2579 0, binding.slot);
2580 }
2581
2582 return 0;
2583 }
2584
2585 /**
2586 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2587 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2588 *
2589 * @dev_priv: Pointer to a device private struct.
2590 * @sw_context: The software context being used for this batch.
2591 * @header: Pointer to the command header in the command stream.
2592 */
vmw_cmd_dx_set_index_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2593 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2594 struct vmw_sw_context *sw_context,
2595 SVGA3dCmdHeader *header)
2596 {
2597 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2598 struct vmw_ctx_bindinfo_ib binding;
2599 struct vmw_resource_val_node *res_node;
2600 struct {
2601 SVGA3dCmdHeader header;
2602 SVGA3dCmdDXSetIndexBuffer body;
2603 } *cmd;
2604 int ret;
2605
2606 if (unlikely(ctx_node == NULL)) {
2607 DRM_ERROR("DX Context not set.\n");
2608 return -EINVAL;
2609 }
2610
2611 cmd = container_of(header, typeof(*cmd), header);
2612 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2613 user_surface_converter,
2614 &cmd->body.sid, &res_node);
2615 if (unlikely(ret != 0))
2616 return ret;
2617
2618 binding.bi.ctx = ctx_node->res;
2619 binding.bi.res = ((res_node) ? res_node->res : NULL);
2620 binding.bi.bt = vmw_ctx_binding_ib;
2621 binding.offset = cmd->body.offset;
2622 binding.format = cmd->body.format;
2623
2624 vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2625
2626 return 0;
2627 }
2628
2629 /**
2630 * vmw_cmd_dx_set_rendertarget - Validate an
2631 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2632 *
2633 * @dev_priv: Pointer to a device private struct.
2634 * @sw_context: The software context being used for this batch.
2635 * @header: Pointer to the command header in the command stream.
2636 */
vmw_cmd_dx_set_rendertargets(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2637 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2638 struct vmw_sw_context *sw_context,
2639 SVGA3dCmdHeader *header)
2640 {
2641 struct {
2642 SVGA3dCmdHeader header;
2643 SVGA3dCmdDXSetRenderTargets body;
2644 } *cmd = container_of(header, typeof(*cmd), header);
2645 int ret;
2646 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2647 sizeof(SVGA3dRenderTargetViewId);
2648
2649 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2650 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2651 return -EINVAL;
2652 }
2653
2654 ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2655 vmw_ctx_binding_ds, 0,
2656 &cmd->body.depthStencilViewId, 1, 0);
2657 if (ret)
2658 return ret;
2659
2660 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2661 vmw_ctx_binding_dx_rt, 0,
2662 (void *)&cmd[1], num_rt_view, 0);
2663 }
2664
2665 /**
2666 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2667 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2668 *
2669 * @dev_priv: Pointer to a device private struct.
2670 * @sw_context: The software context being used for this batch.
2671 * @header: Pointer to the command header in the command stream.
2672 */
vmw_cmd_dx_clear_rendertarget_view(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2673 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2674 struct vmw_sw_context *sw_context,
2675 SVGA3dCmdHeader *header)
2676 {
2677 struct {
2678 SVGA3dCmdHeader header;
2679 SVGA3dCmdDXClearRenderTargetView body;
2680 } *cmd = container_of(header, typeof(*cmd), header);
2681
2682 return vmw_view_id_val_add(sw_context, vmw_view_rt,
2683 cmd->body.renderTargetViewId);
2684 }
2685
2686 /**
2687 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2688 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2689 *
2690 * @dev_priv: Pointer to a device private struct.
2691 * @sw_context: The software context being used for this batch.
2692 * @header: Pointer to the command header in the command stream.
2693 */
vmw_cmd_dx_clear_depthstencil_view(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2694 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2695 struct vmw_sw_context *sw_context,
2696 SVGA3dCmdHeader *header)
2697 {
2698 struct {
2699 SVGA3dCmdHeader header;
2700 SVGA3dCmdDXClearDepthStencilView body;
2701 } *cmd = container_of(header, typeof(*cmd), header);
2702
2703 return vmw_view_id_val_add(sw_context, vmw_view_ds,
2704 cmd->body.depthStencilViewId);
2705 }
2706
vmw_cmd_dx_view_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2707 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2708 struct vmw_sw_context *sw_context,
2709 SVGA3dCmdHeader *header)
2710 {
2711 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2712 struct vmw_resource_val_node *srf_node;
2713 struct vmw_resource *res;
2714 enum vmw_view_type view_type;
2715 int ret;
2716 /*
2717 * This is based on the fact that all affected define commands have
2718 * the same initial command body layout.
2719 */
2720 struct {
2721 SVGA3dCmdHeader header;
2722 uint32 defined_id;
2723 uint32 sid;
2724 } *cmd;
2725
2726 if (unlikely(ctx_node == NULL)) {
2727 DRM_ERROR("DX Context not set.\n");
2728 return -EINVAL;
2729 }
2730
2731 view_type = vmw_view_cmd_to_type(header->id);
2732 if (view_type == vmw_view_max)
2733 return -EINVAL;
2734 cmd = container_of(header, typeof(*cmd), header);
2735 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2736 user_surface_converter,
2737 &cmd->sid, &srf_node);
2738 if (unlikely(ret != 0))
2739 return ret;
2740
2741 res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2742 ret = vmw_cotable_notify(res, cmd->defined_id);
2743 vmw_resource_unreference(&res);
2744 if (unlikely(ret != 0))
2745 return ret;
2746
2747 return vmw_view_add(sw_context->man,
2748 ctx_node->res,
2749 srf_node->res,
2750 view_type,
2751 cmd->defined_id,
2752 header,
2753 header->size + sizeof(*header),
2754 &sw_context->staged_cmd_res);
2755 }
2756
2757 /**
2758 * vmw_cmd_dx_set_so_targets - Validate an
2759 * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2760 *
2761 * @dev_priv: Pointer to a device private struct.
2762 * @sw_context: The software context being used for this batch.
2763 * @header: Pointer to the command header in the command stream.
2764 */
vmw_cmd_dx_set_so_targets(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2765 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2766 struct vmw_sw_context *sw_context,
2767 SVGA3dCmdHeader *header)
2768 {
2769 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2770 struct vmw_ctx_bindinfo_so binding;
2771 struct vmw_resource_val_node *res_node;
2772 struct {
2773 SVGA3dCmdHeader header;
2774 SVGA3dCmdDXSetSOTargets body;
2775 SVGA3dSoTarget targets[];
2776 } *cmd;
2777 int i, ret, num;
2778
2779 if (unlikely(ctx_node == NULL)) {
2780 DRM_ERROR("DX Context not set.\n");
2781 return -EINVAL;
2782 }
2783
2784 cmd = container_of(header, typeof(*cmd), header);
2785 num = (cmd->header.size - sizeof(cmd->body)) /
2786 sizeof(SVGA3dSoTarget);
2787
2788 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2789 DRM_ERROR("Invalid DX SO binding.\n");
2790 return -EINVAL;
2791 }
2792
2793 for (i = 0; i < num; i++) {
2794 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2795 user_surface_converter,
2796 &cmd->targets[i].sid, &res_node);
2797 if (unlikely(ret != 0))
2798 return ret;
2799
2800 binding.bi.ctx = ctx_node->res;
2801 binding.bi.res = ((res_node) ? res_node->res : NULL);
2802 binding.bi.bt = vmw_ctx_binding_so,
2803 binding.offset = cmd->targets[i].offset;
2804 binding.size = cmd->targets[i].sizeInBytes;
2805 binding.slot = i;
2806
2807 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2808 0, binding.slot);
2809 }
2810
2811 return 0;
2812 }
2813
vmw_cmd_dx_so_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2814 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2815 struct vmw_sw_context *sw_context,
2816 SVGA3dCmdHeader *header)
2817 {
2818 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2819 struct vmw_resource *res;
2820 /*
2821 * This is based on the fact that all affected define commands have
2822 * the same initial command body layout.
2823 */
2824 struct {
2825 SVGA3dCmdHeader header;
2826 uint32 defined_id;
2827 } *cmd;
2828 enum vmw_so_type so_type;
2829 int ret;
2830
2831 if (unlikely(ctx_node == NULL)) {
2832 DRM_ERROR("DX Context not set.\n");
2833 return -EINVAL;
2834 }
2835
2836 so_type = vmw_so_cmd_to_type(header->id);
2837 res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2838 cmd = container_of(header, typeof(*cmd), header);
2839 ret = vmw_cotable_notify(res, cmd->defined_id);
2840 vmw_resource_unreference(&res);
2841
2842 return ret;
2843 }
2844
2845 /**
2846 * vmw_cmd_dx_check_subresource - Validate an
2847 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2848 *
2849 * @dev_priv: Pointer to a device private struct.
2850 * @sw_context: The software context being used for this batch.
2851 * @header: Pointer to the command header in the command stream.
2852 */
vmw_cmd_dx_check_subresource(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2853 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2854 struct vmw_sw_context *sw_context,
2855 SVGA3dCmdHeader *header)
2856 {
2857 struct {
2858 SVGA3dCmdHeader header;
2859 union {
2860 SVGA3dCmdDXReadbackSubResource r_body;
2861 SVGA3dCmdDXInvalidateSubResource i_body;
2862 SVGA3dCmdDXUpdateSubResource u_body;
2863 SVGA3dSurfaceId sid;
2864 };
2865 } *cmd;
2866
2867 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2868 offsetof(typeof(*cmd), sid));
2869 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2870 offsetof(typeof(*cmd), sid));
2871 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2872 offsetof(typeof(*cmd), sid));
2873
2874 cmd = container_of(header, typeof(*cmd), header);
2875
2876 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2877 user_surface_converter,
2878 &cmd->sid, NULL);
2879 }
2880
vmw_cmd_dx_cid_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2881 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2882 struct vmw_sw_context *sw_context,
2883 SVGA3dCmdHeader *header)
2884 {
2885 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2886
2887 if (unlikely(ctx_node == NULL)) {
2888 DRM_ERROR("DX Context not set.\n");
2889 return -EINVAL;
2890 }
2891
2892 return 0;
2893 }
2894
2895 /**
2896 * vmw_cmd_dx_view_remove - validate a view remove command and
2897 * schedule the view resource for removal.
2898 *
2899 * @dev_priv: Pointer to a device private struct.
2900 * @sw_context: The software context being used for this batch.
2901 * @header: Pointer to the command header in the command stream.
2902 *
2903 * Check that the view exists, and if it was not created using this
2904 * command batch, conditionally make this command a NOP.
2905 */
vmw_cmd_dx_view_remove(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2906 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2907 struct vmw_sw_context *sw_context,
2908 SVGA3dCmdHeader *header)
2909 {
2910 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2911 struct {
2912 SVGA3dCmdHeader header;
2913 union vmw_view_destroy body;
2914 } *cmd = container_of(header, typeof(*cmd), header);
2915 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2916 struct vmw_resource *view;
2917 int ret;
2918
2919 if (!ctx_node) {
2920 DRM_ERROR("DX Context not set.\n");
2921 return -EINVAL;
2922 }
2923
2924 ret = vmw_view_remove(sw_context->man,
2925 cmd->body.view_id, view_type,
2926 &sw_context->staged_cmd_res,
2927 &view);
2928 if (ret || !view)
2929 return ret;
2930
2931 /*
2932 * If the view wasn't created during this command batch, it might
2933 * have been removed due to a context swapout, so add a
2934 * relocation to conditionally make this command a NOP to avoid
2935 * device errors.
2936 */
2937 return vmw_resource_relocation_add(&sw_context->res_relocations,
2938 view,
2939 vmw_ptr_diff(sw_context->buf_start,
2940 &cmd->header.id),
2941 vmw_res_rel_cond_nop);
2942 }
2943
2944 /**
2945 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2946 * command
2947 *
2948 * @dev_priv: Pointer to a device private struct.
2949 * @sw_context: The software context being used for this batch.
2950 * @header: Pointer to the command header in the command stream.
2951 */
vmw_cmd_dx_define_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2952 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2953 struct vmw_sw_context *sw_context,
2954 SVGA3dCmdHeader *header)
2955 {
2956 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2957 struct vmw_resource *res;
2958 struct {
2959 SVGA3dCmdHeader header;
2960 SVGA3dCmdDXDefineShader body;
2961 } *cmd = container_of(header, typeof(*cmd), header);
2962 int ret;
2963
2964 if (!ctx_node) {
2965 DRM_ERROR("DX Context not set.\n");
2966 return -EINVAL;
2967 }
2968
2969 res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2970 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2971 vmw_resource_unreference(&res);
2972 if (ret)
2973 return ret;
2974
2975 return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2976 cmd->body.shaderId, cmd->body.type,
2977 &sw_context->staged_cmd_res);
2978 }
2979
2980 /**
2981 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2982 * command
2983 *
2984 * @dev_priv: Pointer to a device private struct.
2985 * @sw_context: The software context being used for this batch.
2986 * @header: Pointer to the command header in the command stream.
2987 */
vmw_cmd_dx_destroy_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2988 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2989 struct vmw_sw_context *sw_context,
2990 SVGA3dCmdHeader *header)
2991 {
2992 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2993 struct {
2994 SVGA3dCmdHeader header;
2995 SVGA3dCmdDXDestroyShader body;
2996 } *cmd = container_of(header, typeof(*cmd), header);
2997 int ret;
2998
2999 if (!ctx_node) {
3000 DRM_ERROR("DX Context not set.\n");
3001 return -EINVAL;
3002 }
3003
3004 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
3005 &sw_context->staged_cmd_res);
3006 if (ret)
3007 DRM_ERROR("Could not find shader to remove.\n");
3008
3009 return ret;
3010 }
3011
3012 /**
3013 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
3014 * command
3015 *
3016 * @dev_priv: Pointer to a device private struct.
3017 * @sw_context: The software context being used for this batch.
3018 * @header: Pointer to the command header in the command stream.
3019 */
vmw_cmd_dx_bind_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3020 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
3021 struct vmw_sw_context *sw_context,
3022 SVGA3dCmdHeader *header)
3023 {
3024 struct vmw_resource_val_node *ctx_node;
3025 struct vmw_resource_val_node *res_node;
3026 struct vmw_resource *res;
3027 struct {
3028 SVGA3dCmdHeader header;
3029 SVGA3dCmdDXBindShader body;
3030 } *cmd = container_of(header, typeof(*cmd), header);
3031 int ret;
3032
3033 if (cmd->body.cid != SVGA3D_INVALID_ID) {
3034 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
3035 user_context_converter,
3036 &cmd->body.cid, &ctx_node);
3037 if (ret)
3038 return ret;
3039 } else {
3040 ctx_node = sw_context->dx_ctx_node;
3041 if (!ctx_node) {
3042 DRM_ERROR("DX Context not set.\n");
3043 return -EINVAL;
3044 }
3045 }
3046
3047 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
3048 cmd->body.shid, 0);
3049 if (IS_ERR(res)) {
3050 DRM_ERROR("Could not find shader to bind.\n");
3051 return PTR_ERR(res);
3052 }
3053
3054 ret = vmw_resource_val_add(sw_context, res, &res_node);
3055 if (ret) {
3056 DRM_ERROR("Error creating resource validation node.\n");
3057 goto out_unref;
3058 }
3059
3060
3061 ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
3062 &cmd->body.mobid,
3063 cmd->body.offsetInBytes);
3064 out_unref:
3065 vmw_resource_unreference(&res);
3066
3067 return ret;
3068 }
3069
3070 /**
3071 * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
3072 *
3073 * @dev_priv: Pointer to a device private struct.
3074 * @sw_context: The software context being used for this batch.
3075 * @header: Pointer to the command header in the command stream.
3076 */
vmw_cmd_dx_genmips(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3077 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
3078 struct vmw_sw_context *sw_context,
3079 SVGA3dCmdHeader *header)
3080 {
3081 struct {
3082 SVGA3dCmdHeader header;
3083 SVGA3dCmdDXGenMips body;
3084 } *cmd = container_of(header, typeof(*cmd), header);
3085
3086 return vmw_view_id_val_add(sw_context, vmw_view_sr,
3087 cmd->body.shaderResourceViewId);
3088 }
3089
3090 /**
3091 * vmw_cmd_dx_transfer_from_buffer -
3092 * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
3093 *
3094 * @dev_priv: Pointer to a device private struct.
3095 * @sw_context: The software context being used for this batch.
3096 * @header: Pointer to the command header in the command stream.
3097 */
vmw_cmd_dx_transfer_from_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3098 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
3099 struct vmw_sw_context *sw_context,
3100 SVGA3dCmdHeader *header)
3101 {
3102 struct {
3103 SVGA3dCmdHeader header;
3104 SVGA3dCmdDXTransferFromBuffer body;
3105 } *cmd = container_of(header, typeof(*cmd), header);
3106 int ret;
3107
3108 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3109 user_surface_converter,
3110 &cmd->body.srcSid, NULL);
3111 if (ret != 0)
3112 return ret;
3113
3114 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3115 user_surface_converter,
3116 &cmd->body.destSid, NULL);
3117 }
3118
3119 /**
3120 * vmw_cmd_intra_surface_copy -
3121 * Validate an SVGA_3D_CMD_INTRA_SURFACE_COPY command
3122 *
3123 * @dev_priv: Pointer to a device private struct.
3124 * @sw_context: The software context being used for this batch.
3125 * @header: Pointer to the command header in the command stream.
3126 */
vmw_cmd_intra_surface_copy(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3127 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
3128 struct vmw_sw_context *sw_context,
3129 SVGA3dCmdHeader *header)
3130 {
3131 struct {
3132 SVGA3dCmdHeader header;
3133 SVGA3dCmdIntraSurfaceCopy body;
3134 } *cmd = container_of(header, typeof(*cmd), header);
3135
3136 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
3137 return -EINVAL;
3138
3139 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3140 user_surface_converter,
3141 &cmd->body.surface.sid, NULL);
3142 }
3143
3144
vmw_cmd_check_not_3d(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t * size)3145 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3146 struct vmw_sw_context *sw_context,
3147 void *buf, uint32_t *size)
3148 {
3149 uint32_t size_remaining = *size;
3150 uint32_t cmd_id;
3151
3152 cmd_id = ((uint32_t *)buf)[0];
3153 switch (cmd_id) {
3154 case SVGA_CMD_UPDATE:
3155 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3156 break;
3157 case SVGA_CMD_DEFINE_GMRFB:
3158 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3159 break;
3160 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3161 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3162 break;
3163 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3164 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3165 break;
3166 default:
3167 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
3168 return -EINVAL;
3169 }
3170
3171 if (*size > size_remaining) {
3172 DRM_ERROR("Invalid SVGA command (size mismatch):"
3173 " %u.\n", cmd_id);
3174 return -EINVAL;
3175 }
3176
3177 if (unlikely(!sw_context->kernel)) {
3178 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
3179 return -EPERM;
3180 }
3181
3182 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3183 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3184
3185 return 0;
3186 }
3187
3188 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3189 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3190 false, false, false),
3191 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3192 false, false, false),
3193 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3194 true, false, false),
3195 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3196 true, false, false),
3197 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3198 true, false, false),
3199 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3200 false, false, false),
3201 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3202 false, false, false),
3203 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3204 true, false, false),
3205 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3206 true, false, false),
3207 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3208 true, false, false),
3209 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3210 &vmw_cmd_set_render_target_check, true, false, false),
3211 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3212 true, false, false),
3213 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3214 true, false, false),
3215 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3216 true, false, false),
3217 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3218 true, false, false),
3219 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3220 true, false, false),
3221 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3222 true, false, false),
3223 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3224 true, false, false),
3225 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3226 false, false, false),
3227 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3228 true, false, false),
3229 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3230 true, false, false),
3231 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3232 true, false, false),
3233 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3234 true, false, false),
3235 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3236 true, false, false),
3237 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3238 true, false, false),
3239 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3240 true, false, false),
3241 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3242 true, false, false),
3243 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3244 true, false, false),
3245 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3246 true, false, false),
3247 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3248 &vmw_cmd_blt_surf_screen_check, false, false, false),
3249 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3250 false, false, false),
3251 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3252 false, false, false),
3253 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3254 false, false, false),
3255 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3256 false, false, false),
3257 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3258 false, false, false),
3259 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3260 false, false, false),
3261 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3262 false, false, false),
3263 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
3264 false, false, false),
3265 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
3266 false, false, false),
3267 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
3268 false, false, false),
3269 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
3270 false, false, false),
3271 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
3272 false, false, false),
3273 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
3274 false, false, false),
3275 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3276 false, false, true),
3277 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3278 false, false, true),
3279 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3280 false, false, true),
3281 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3282 false, false, true),
3283 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3284 false, false, true),
3285 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3286 false, false, true),
3287 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3288 false, false, true),
3289 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3290 false, false, true),
3291 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3292 true, false, true),
3293 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3294 false, false, true),
3295 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3296 true, false, true),
3297 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3298 &vmw_cmd_update_gb_surface, true, false, true),
3299 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3300 &vmw_cmd_readback_gb_image, true, false, true),
3301 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3302 &vmw_cmd_readback_gb_surface, true, false, true),
3303 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3304 &vmw_cmd_invalidate_gb_image, true, false, true),
3305 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3306 &vmw_cmd_invalidate_gb_surface, true, false, true),
3307 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3308 false, false, true),
3309 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3310 false, false, true),
3311 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3312 false, false, true),
3313 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3314 false, false, true),
3315 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3316 false, false, true),
3317 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3318 false, false, true),
3319 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3320 true, false, true),
3321 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3322 false, false, true),
3323 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3324 false, false, false),
3325 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3326 true, false, true),
3327 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3328 true, false, true),
3329 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3330 true, false, true),
3331 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3332 true, false, true),
3333 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3334 true, false, true),
3335 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3336 false, false, true),
3337 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3338 false, false, true),
3339 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3340 false, false, true),
3341 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3342 false, false, true),
3343 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3344 false, false, true),
3345 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3346 false, false, true),
3347 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3348 false, false, true),
3349 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3350 false, false, true),
3351 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3352 false, false, true),
3353 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3354 false, false, true),
3355 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3356 true, false, true),
3357 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3358 false, false, true),
3359 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3360 false, false, true),
3361 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3362 false, false, true),
3363 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3364 false, false, true),
3365
3366 /*
3367 * DX commands
3368 */
3369 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3370 false, false, true),
3371 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3372 false, false, true),
3373 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3374 false, false, true),
3375 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3376 false, false, true),
3377 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3378 false, false, true),
3379 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3380 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3381 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3382 &vmw_cmd_dx_set_shader_res, true, false, true),
3383 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3384 true, false, true),
3385 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3386 true, false, true),
3387 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3388 true, false, true),
3389 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3390 true, false, true),
3391 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3392 true, false, true),
3393 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3394 &vmw_cmd_dx_cid_check, true, false, true),
3395 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3396 true, false, true),
3397 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3398 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3399 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3400 &vmw_cmd_dx_set_index_buffer, true, false, true),
3401 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3402 &vmw_cmd_dx_set_rendertargets, true, false, true),
3403 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3404 true, false, true),
3405 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3406 &vmw_cmd_dx_cid_check, true, false, true),
3407 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3408 &vmw_cmd_dx_cid_check, true, false, true),
3409 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3410 true, false, true),
3411 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3412 true, false, true),
3413 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3414 true, false, true),
3415 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3416 &vmw_cmd_dx_cid_check, true, false, true),
3417 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3418 true, false, true),
3419 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3420 true, false, true),
3421 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3422 true, false, true),
3423 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3424 true, false, true),
3425 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3426 true, false, true),
3427 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3428 true, false, true),
3429 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3430 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3431 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3432 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3433 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3434 true, false, true),
3435 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3436 true, false, true),
3437 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3438 &vmw_cmd_dx_check_subresource, true, false, true),
3439 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3440 &vmw_cmd_dx_check_subresource, true, false, true),
3441 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3442 &vmw_cmd_dx_check_subresource, true, false, true),
3443 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3444 &vmw_cmd_dx_view_define, true, false, true),
3445 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3446 &vmw_cmd_dx_view_remove, true, false, true),
3447 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3448 &vmw_cmd_dx_view_define, true, false, true),
3449 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3450 &vmw_cmd_dx_view_remove, true, false, true),
3451 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3452 &vmw_cmd_dx_view_define, true, false, true),
3453 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3454 &vmw_cmd_dx_view_remove, true, false, true),
3455 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3456 &vmw_cmd_dx_so_define, true, false, true),
3457 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3458 &vmw_cmd_dx_cid_check, true, false, true),
3459 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3460 &vmw_cmd_dx_so_define, true, false, true),
3461 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3462 &vmw_cmd_dx_cid_check, true, false, true),
3463 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3464 &vmw_cmd_dx_so_define, true, false, true),
3465 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3466 &vmw_cmd_dx_cid_check, true, false, true),
3467 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3468 &vmw_cmd_dx_so_define, true, false, true),
3469 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3470 &vmw_cmd_dx_cid_check, true, false, true),
3471 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3472 &vmw_cmd_dx_so_define, true, false, true),
3473 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3474 &vmw_cmd_dx_cid_check, true, false, true),
3475 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3476 &vmw_cmd_dx_define_shader, true, false, true),
3477 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3478 &vmw_cmd_dx_destroy_shader, true, false, true),
3479 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3480 &vmw_cmd_dx_bind_shader, true, false, true),
3481 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3482 &vmw_cmd_dx_so_define, true, false, true),
3483 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3484 &vmw_cmd_dx_cid_check, true, false, true),
3485 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3486 true, false, true),
3487 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3488 &vmw_cmd_dx_set_so_targets, true, false, true),
3489 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3490 &vmw_cmd_dx_cid_check, true, false, true),
3491 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3492 &vmw_cmd_dx_cid_check, true, false, true),
3493 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3494 &vmw_cmd_buffer_copy_check, true, false, true),
3495 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3496 &vmw_cmd_pred_copy_check, true, false, true),
3497 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3498 &vmw_cmd_dx_transfer_from_buffer,
3499 true, false, true),
3500 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3501 true, false, true),
3502 };
3503
vmw_cmd_describe(const void * buf,u32 * size,char const ** cmd)3504 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3505 {
3506 u32 cmd_id = ((u32 *) buf)[0];
3507
3508 if (cmd_id >= SVGA_CMD_MAX) {
3509 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3510 const struct vmw_cmd_entry *entry;
3511
3512 *size = header->size + sizeof(SVGA3dCmdHeader);
3513 cmd_id = header->id;
3514 if (cmd_id >= SVGA_3D_CMD_MAX)
3515 return false;
3516
3517 cmd_id -= SVGA_3D_CMD_BASE;
3518 entry = &vmw_cmd_entries[cmd_id];
3519 *cmd = entry->cmd_name;
3520 return true;
3521 }
3522
3523 switch (cmd_id) {
3524 case SVGA_CMD_UPDATE:
3525 *cmd = "SVGA_CMD_UPDATE";
3526 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3527 break;
3528 case SVGA_CMD_DEFINE_GMRFB:
3529 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3530 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3531 break;
3532 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3533 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3534 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3535 break;
3536 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3537 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3538 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3539 break;
3540 default:
3541 *cmd = "UNKNOWN";
3542 *size = 0;
3543 return false;
3544 }
3545
3546 return true;
3547 }
3548
vmw_cmd_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t * size)3549 static int vmw_cmd_check(struct vmw_private *dev_priv,
3550 struct vmw_sw_context *sw_context,
3551 void *buf, uint32_t *size)
3552 {
3553 uint32_t cmd_id;
3554 uint32_t size_remaining = *size;
3555 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3556 int ret;
3557 const struct vmw_cmd_entry *entry;
3558 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3559
3560 cmd_id = ((uint32_t *)buf)[0];
3561 /* Handle any none 3D commands */
3562 if (unlikely(cmd_id < SVGA_CMD_MAX))
3563 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3564
3565
3566 cmd_id = header->id;
3567 *size = header->size + sizeof(SVGA3dCmdHeader);
3568
3569 cmd_id -= SVGA_3D_CMD_BASE;
3570 if (unlikely(*size > size_remaining))
3571 goto out_invalid;
3572
3573 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3574 goto out_invalid;
3575
3576 entry = &vmw_cmd_entries[cmd_id];
3577 if (unlikely(!entry->func))
3578 goto out_invalid;
3579
3580 if (unlikely(!entry->user_allow && !sw_context->kernel))
3581 goto out_privileged;
3582
3583 if (unlikely(entry->gb_disable && gb))
3584 goto out_old;
3585
3586 if (unlikely(entry->gb_enable && !gb))
3587 goto out_new;
3588
3589 ret = entry->func(dev_priv, sw_context, header);
3590 if (unlikely(ret != 0))
3591 goto out_invalid;
3592
3593 return 0;
3594 out_invalid:
3595 DRM_ERROR("Invalid SVGA3D command: %d\n",
3596 cmd_id + SVGA_3D_CMD_BASE);
3597 return -EINVAL;
3598 out_privileged:
3599 DRM_ERROR("Privileged SVGA3D command: %d\n",
3600 cmd_id + SVGA_3D_CMD_BASE);
3601 return -EPERM;
3602 out_old:
3603 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3604 cmd_id + SVGA_3D_CMD_BASE);
3605 return -EINVAL;
3606 out_new:
3607 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
3608 cmd_id + SVGA_3D_CMD_BASE);
3609 return -EINVAL;
3610 }
3611
vmw_cmd_check_all(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t size)3612 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3613 struct vmw_sw_context *sw_context,
3614 void *buf,
3615 uint32_t size)
3616 {
3617 int32_t cur_size = size;
3618 int ret;
3619
3620 sw_context->buf_start = buf;
3621
3622 while (cur_size > 0) {
3623 size = cur_size;
3624 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3625 if (unlikely(ret != 0))
3626 return ret;
3627 buf = (void *)((unsigned long) buf + size);
3628 cur_size -= size;
3629 }
3630
3631 if (unlikely(cur_size != 0)) {
3632 DRM_ERROR("Command verifier out of sync.\n");
3633 return -EINVAL;
3634 }
3635
3636 return 0;
3637 }
3638
vmw_free_relocations(struct vmw_sw_context * sw_context)3639 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3640 {
3641 sw_context->cur_reloc = 0;
3642 }
3643
vmw_apply_relocations(struct vmw_sw_context * sw_context)3644 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3645 {
3646 uint32_t i;
3647 struct vmw_relocation *reloc;
3648 struct ttm_validate_buffer *validate;
3649 struct ttm_buffer_object *bo;
3650
3651 for (i = 0; i < sw_context->cur_reloc; ++i) {
3652 reloc = &sw_context->relocs[i];
3653 validate = &sw_context->val_bufs[reloc->index].base;
3654 bo = validate->bo;
3655 switch (bo->mem.mem_type) {
3656 case TTM_PL_VRAM:
3657 reloc->location->offset += bo->offset;
3658 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3659 break;
3660 case VMW_PL_GMR:
3661 reloc->location->gmrId = bo->mem.start;
3662 break;
3663 case VMW_PL_MOB:
3664 *reloc->mob_loc = bo->mem.start;
3665 break;
3666 default:
3667 BUG();
3668 }
3669 }
3670 vmw_free_relocations(sw_context);
3671 }
3672
3673 /**
3674 * vmw_resource_list_unrefererence - Free up a resource list and unreference
3675 * all resources referenced by it.
3676 *
3677 * @list: The resource list.
3678 */
vmw_resource_list_unreference(struct vmw_sw_context * sw_context,struct list_head * list)3679 static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3680 struct list_head *list)
3681 {
3682 struct vmw_resource_val_node *val, *val_next;
3683
3684 /*
3685 * Drop references to resources held during command submission.
3686 */
3687
3688 list_for_each_entry_safe(val, val_next, list, head) {
3689 list_del_init(&val->head);
3690 vmw_resource_unreference(&val->res);
3691
3692 if (val->staged_bindings) {
3693 if (val->staged_bindings != sw_context->staged_bindings)
3694 vmw_binding_state_free(val->staged_bindings);
3695 else
3696 sw_context->staged_bindings_inuse = false;
3697 val->staged_bindings = NULL;
3698 }
3699
3700 kfree(val);
3701 }
3702 }
3703
vmw_clear_validations(struct vmw_sw_context * sw_context)3704 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
3705 {
3706 struct vmw_validate_buffer *entry, *next;
3707 struct vmw_resource_val_node *val;
3708
3709 /*
3710 * Drop references to DMA buffers held during command submission.
3711 */
3712 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
3713 base.head) {
3714 list_del(&entry->base.head);
3715 ttm_bo_unref(&entry->base.bo);
3716 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
3717 sw_context->cur_val_buf--;
3718 }
3719 BUG_ON(sw_context->cur_val_buf != 0);
3720
3721 list_for_each_entry(val, &sw_context->resource_list, head)
3722 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
3723 }
3724
vmw_validate_single_buffer(struct vmw_private * dev_priv,struct ttm_buffer_object * bo,bool interruptible,bool validate_as_mob)3725 int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3726 struct ttm_buffer_object *bo,
3727 bool interruptible,
3728 bool validate_as_mob)
3729 {
3730 struct vmw_buffer_object *vbo =
3731 container_of(bo, struct vmw_buffer_object, base);
3732 struct ttm_operation_ctx ctx = { interruptible, false };
3733 int ret;
3734
3735 if (vbo->pin_count > 0)
3736 return 0;
3737
3738 if (validate_as_mob)
3739 return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
3740
3741 /**
3742 * Put BO in VRAM if there is space, otherwise as a GMR.
3743 * If there is no space in VRAM and GMR ids are all used up,
3744 * start evicting GMRs to make room. If the DMA buffer can't be
3745 * used as a GMR, this will return -ENOMEM.
3746 */
3747
3748 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
3749 if (likely(ret == 0 || ret == -ERESTARTSYS))
3750 return ret;
3751
3752 /**
3753 * If that failed, try VRAM again, this time evicting
3754 * previous contents.
3755 */
3756
3757 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
3758 return ret;
3759 }
3760
vmw_validate_buffers(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context)3761 static int vmw_validate_buffers(struct vmw_private *dev_priv,
3762 struct vmw_sw_context *sw_context)
3763 {
3764 struct vmw_validate_buffer *entry;
3765 int ret;
3766
3767 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
3768 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
3769 true,
3770 entry->validate_as_mob);
3771 if (unlikely(ret != 0))
3772 return ret;
3773 }
3774 return 0;
3775 }
3776
vmw_resize_cmd_bounce(struct vmw_sw_context * sw_context,uint32_t size)3777 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3778 uint32_t size)
3779 {
3780 if (likely(sw_context->cmd_bounce_size >= size))
3781 return 0;
3782
3783 if (sw_context->cmd_bounce_size == 0)
3784 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3785
3786 while (sw_context->cmd_bounce_size < size) {
3787 sw_context->cmd_bounce_size =
3788 PAGE_ALIGN(sw_context->cmd_bounce_size +
3789 (sw_context->cmd_bounce_size >> 1));
3790 }
3791
3792 vfree(sw_context->cmd_bounce);
3793 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3794
3795 if (sw_context->cmd_bounce == NULL) {
3796 DRM_ERROR("Failed to allocate command bounce buffer.\n");
3797 sw_context->cmd_bounce_size = 0;
3798 return -ENOMEM;
3799 }
3800
3801 return 0;
3802 }
3803
3804 /**
3805 * vmw_execbuf_fence_commands - create and submit a command stream fence
3806 *
3807 * Creates a fence object and submits a command stream marker.
3808 * If this fails for some reason, We sync the fifo and return NULL.
3809 * It is then safe to fence buffers with a NULL pointer.
3810 *
3811 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3812 * a userspace handle if @p_handle is not NULL, otherwise not.
3813 */
3814
vmw_execbuf_fence_commands(struct drm_file * file_priv,struct vmw_private * dev_priv,struct vmw_fence_obj ** p_fence,uint32_t * p_handle)3815 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3816 struct vmw_private *dev_priv,
3817 struct vmw_fence_obj **p_fence,
3818 uint32_t *p_handle)
3819 {
3820 uint32_t sequence;
3821 int ret;
3822 bool synced = false;
3823
3824 /* p_handle implies file_priv. */
3825 BUG_ON(p_handle != NULL && file_priv == NULL);
3826
3827 ret = vmw_fifo_send_fence(dev_priv, &sequence);
3828 if (unlikely(ret != 0)) {
3829 DRM_ERROR("Fence submission error. Syncing.\n");
3830 synced = true;
3831 }
3832
3833 if (p_handle != NULL)
3834 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3835 sequence, p_fence, p_handle);
3836 else
3837 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3838
3839 if (unlikely(ret != 0 && !synced)) {
3840 (void) vmw_fallback_wait(dev_priv, false, false,
3841 sequence, false,
3842 VMW_FENCE_WAIT_TIMEOUT);
3843 *p_fence = NULL;
3844 }
3845
3846 return 0;
3847 }
3848
3849 /**
3850 * vmw_execbuf_copy_fence_user - copy fence object information to
3851 * user-space.
3852 *
3853 * @dev_priv: Pointer to a vmw_private struct.
3854 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3855 * @ret: Return value from fence object creation.
3856 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3857 * which the information should be copied.
3858 * @fence: Pointer to the fenc object.
3859 * @fence_handle: User-space fence handle.
3860 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3861 * @sync_file: Only used to clean up in case of an error in this function.
3862 *
3863 * This function copies fence information to user-space. If copying fails,
3864 * The user-space struct drm_vmw_fence_rep::error member is hopefully
3865 * left untouched, and if it's preloaded with an -EFAULT by user-space,
3866 * the error will hopefully be detected.
3867 * Also if copying fails, user-space will be unable to signal the fence
3868 * object so we wait for it immediately, and then unreference the
3869 * user-space reference.
3870 */
3871 void
vmw_execbuf_copy_fence_user(struct vmw_private * dev_priv,struct vmw_fpriv * vmw_fp,int ret,struct drm_vmw_fence_rep __user * user_fence_rep,struct vmw_fence_obj * fence,uint32_t fence_handle,int32_t out_fence_fd,struct sync_file * sync_file)3872 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3873 struct vmw_fpriv *vmw_fp,
3874 int ret,
3875 struct drm_vmw_fence_rep __user *user_fence_rep,
3876 struct vmw_fence_obj *fence,
3877 uint32_t fence_handle,
3878 int32_t out_fence_fd,
3879 struct sync_file *sync_file)
3880 {
3881 struct drm_vmw_fence_rep fence_rep;
3882
3883 if (user_fence_rep == NULL)
3884 return;
3885
3886 memset(&fence_rep, 0, sizeof(fence_rep));
3887
3888 fence_rep.error = ret;
3889 fence_rep.fd = out_fence_fd;
3890 if (ret == 0) {
3891 BUG_ON(fence == NULL);
3892
3893 fence_rep.handle = fence_handle;
3894 fence_rep.seqno = fence->base.seqno;
3895 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3896 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3897 }
3898
3899 /*
3900 * copy_to_user errors will be detected by user space not
3901 * seeing fence_rep::error filled in. Typically
3902 * user-space would have pre-set that member to -EFAULT.
3903 */
3904 ret = copy_to_user(user_fence_rep, &fence_rep,
3905 sizeof(fence_rep));
3906
3907 /*
3908 * User-space lost the fence object. We need to sync
3909 * and unreference the handle.
3910 */
3911 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3912 if (sync_file)
3913 fput(sync_file->file);
3914
3915 if (fence_rep.fd != -1) {
3916 put_unused_fd(fence_rep.fd);
3917 fence_rep.fd = -1;
3918 }
3919
3920 ttm_ref_object_base_unref(vmw_fp->tfile,
3921 fence_handle, TTM_REF_USAGE);
3922 DRM_ERROR("Fence copy error. Syncing.\n");
3923 (void) vmw_fence_obj_wait(fence, false, false,
3924 VMW_FENCE_WAIT_TIMEOUT);
3925 }
3926 }
3927
3928 /**
3929 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3930 * the fifo.
3931 *
3932 * @dev_priv: Pointer to a device private structure.
3933 * @kernel_commands: Pointer to the unpatched command batch.
3934 * @command_size: Size of the unpatched command batch.
3935 * @sw_context: Structure holding the relocation lists.
3936 *
3937 * Side effects: If this function returns 0, then the command batch
3938 * pointed to by @kernel_commands will have been modified.
3939 */
vmw_execbuf_submit_fifo(struct vmw_private * dev_priv,void * kernel_commands,u32 command_size,struct vmw_sw_context * sw_context)3940 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3941 void *kernel_commands,
3942 u32 command_size,
3943 struct vmw_sw_context *sw_context)
3944 {
3945 void *cmd;
3946
3947 if (sw_context->dx_ctx_node)
3948 cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3949 sw_context->dx_ctx_node->res->id);
3950 else
3951 cmd = vmw_fifo_reserve(dev_priv, command_size);
3952 if (!cmd) {
3953 DRM_ERROR("Failed reserving fifo space for commands.\n");
3954 return -ENOMEM;
3955 }
3956
3957 vmw_apply_relocations(sw_context);
3958 memcpy(cmd, kernel_commands, command_size);
3959 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3960 vmw_resource_relocations_free(&sw_context->res_relocations);
3961 vmw_fifo_commit(dev_priv, command_size);
3962
3963 return 0;
3964 }
3965
3966 /**
3967 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3968 * the command buffer manager.
3969 *
3970 * @dev_priv: Pointer to a device private structure.
3971 * @header: Opaque handle to the command buffer allocation.
3972 * @command_size: Size of the unpatched command batch.
3973 * @sw_context: Structure holding the relocation lists.
3974 *
3975 * Side effects: If this function returns 0, then the command buffer
3976 * represented by @header will have been modified.
3977 */
vmw_execbuf_submit_cmdbuf(struct vmw_private * dev_priv,struct vmw_cmdbuf_header * header,u32 command_size,struct vmw_sw_context * sw_context)3978 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3979 struct vmw_cmdbuf_header *header,
3980 u32 command_size,
3981 struct vmw_sw_context *sw_context)
3982 {
3983 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3984 SVGA3D_INVALID_ID);
3985 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
3986 id, false, header);
3987
3988 vmw_apply_relocations(sw_context);
3989 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3990 vmw_resource_relocations_free(&sw_context->res_relocations);
3991 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3992
3993 return 0;
3994 }
3995
3996 /**
3997 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3998 * submission using a command buffer.
3999 *
4000 * @dev_priv: Pointer to a device private structure.
4001 * @user_commands: User-space pointer to the commands to be submitted.
4002 * @command_size: Size of the unpatched command batch.
4003 * @header: Out parameter returning the opaque pointer to the command buffer.
4004 *
4005 * This function checks whether we can use the command buffer manager for
4006 * submission and if so, creates a command buffer of suitable size and
4007 * copies the user data into that buffer.
4008 *
4009 * On successful return, the function returns a pointer to the data in the
4010 * command buffer and *@header is set to non-NULL.
4011 * If command buffers could not be used, the function will return the value
4012 * of @kernel_commands on function call. That value may be NULL. In that case,
4013 * the value of *@header will be set to NULL.
4014 * If an error is encountered, the function will return a pointer error value.
4015 * If the function is interrupted by a signal while sleeping, it will return
4016 * -ERESTARTSYS casted to a pointer error value.
4017 */
vmw_execbuf_cmdbuf(struct vmw_private * dev_priv,void __user * user_commands,void * kernel_commands,u32 command_size,struct vmw_cmdbuf_header ** header)4018 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
4019 void __user *user_commands,
4020 void *kernel_commands,
4021 u32 command_size,
4022 struct vmw_cmdbuf_header **header)
4023 {
4024 size_t cmdbuf_size;
4025 int ret;
4026
4027 *header = NULL;
4028 if (command_size > SVGA_CB_MAX_SIZE) {
4029 DRM_ERROR("Command buffer is too large.\n");
4030 return ERR_PTR(-EINVAL);
4031 }
4032
4033 if (!dev_priv->cman || kernel_commands)
4034 return kernel_commands;
4035
4036 /* If possible, add a little space for fencing. */
4037 cmdbuf_size = command_size + 512;
4038 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
4039 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
4040 true, header);
4041 if (IS_ERR(kernel_commands))
4042 return kernel_commands;
4043
4044 ret = copy_from_user(kernel_commands, user_commands,
4045 command_size);
4046 if (ret) {
4047 DRM_ERROR("Failed copying commands.\n");
4048 vmw_cmdbuf_header_free(*header);
4049 *header = NULL;
4050 return ERR_PTR(-EFAULT);
4051 }
4052
4053 return kernel_commands;
4054 }
4055
vmw_execbuf_tie_context(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,uint32_t handle)4056 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4057 struct vmw_sw_context *sw_context,
4058 uint32_t handle)
4059 {
4060 struct vmw_resource_val_node *ctx_node;
4061 struct vmw_resource *res;
4062 int ret;
4063
4064 if (handle == SVGA3D_INVALID_ID)
4065 return 0;
4066
4067 ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
4068 handle, user_context_converter,
4069 &res);
4070 if (unlikely(ret != 0)) {
4071 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
4072 (unsigned) handle);
4073 return ret;
4074 }
4075
4076 ret = vmw_resource_val_add(sw_context, res, &ctx_node);
4077 if (unlikely(ret != 0))
4078 goto out_err;
4079
4080 sw_context->dx_ctx_node = ctx_node;
4081 sw_context->man = vmw_context_res_man(res);
4082 out_err:
4083 vmw_resource_unreference(&res);
4084 return ret;
4085 }
4086
vmw_execbuf_process(struct drm_file * file_priv,struct vmw_private * dev_priv,void __user * user_commands,void * kernel_commands,uint32_t command_size,uint64_t throttle_us,uint32_t dx_context_handle,struct drm_vmw_fence_rep __user * user_fence_rep,struct vmw_fence_obj ** out_fence,uint32_t flags)4087 int vmw_execbuf_process(struct drm_file *file_priv,
4088 struct vmw_private *dev_priv,
4089 void __user *user_commands,
4090 void *kernel_commands,
4091 uint32_t command_size,
4092 uint64_t throttle_us,
4093 uint32_t dx_context_handle,
4094 struct drm_vmw_fence_rep __user *user_fence_rep,
4095 struct vmw_fence_obj **out_fence,
4096 uint32_t flags)
4097 {
4098 struct vmw_sw_context *sw_context = &dev_priv->ctx;
4099 struct vmw_fence_obj *fence = NULL;
4100 struct vmw_resource *error_resource;
4101 struct list_head resource_list;
4102 struct vmw_cmdbuf_header *header;
4103 struct ww_acquire_ctx ticket;
4104 uint32_t handle;
4105 int ret;
4106 int32_t out_fence_fd = -1;
4107 struct sync_file *sync_file = NULL;
4108
4109
4110 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4111 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4112 if (out_fence_fd < 0) {
4113 DRM_ERROR("Failed to get a fence file descriptor.\n");
4114 return out_fence_fd;
4115 }
4116 }
4117
4118 if (throttle_us) {
4119 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
4120 throttle_us);
4121
4122 if (ret)
4123 goto out_free_fence_fd;
4124 }
4125
4126 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4127 kernel_commands, command_size,
4128 &header);
4129 if (IS_ERR(kernel_commands)) {
4130 ret = PTR_ERR(kernel_commands);
4131 goto out_free_fence_fd;
4132 }
4133
4134 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4135 if (ret) {
4136 ret = -ERESTARTSYS;
4137 goto out_free_header;
4138 }
4139
4140 sw_context->kernel = false;
4141 if (kernel_commands == NULL) {
4142 ret = vmw_resize_cmd_bounce(sw_context, command_size);
4143 if (unlikely(ret != 0))
4144 goto out_unlock;
4145
4146
4147 ret = copy_from_user(sw_context->cmd_bounce,
4148 user_commands, command_size);
4149
4150 if (unlikely(ret != 0)) {
4151 ret = -EFAULT;
4152 DRM_ERROR("Failed copying commands.\n");
4153 goto out_unlock;
4154 }
4155 kernel_commands = sw_context->cmd_bounce;
4156 } else if (!header)
4157 sw_context->kernel = true;
4158
4159 sw_context->fp = vmw_fpriv(file_priv);
4160 sw_context->cur_reloc = 0;
4161 sw_context->cur_val_buf = 0;
4162 INIT_LIST_HEAD(&sw_context->resource_list);
4163 INIT_LIST_HEAD(&sw_context->ctx_resource_list);
4164 sw_context->cur_query_bo = dev_priv->pinned_bo;
4165 sw_context->last_query_ctx = NULL;
4166 sw_context->needs_post_query_barrier = false;
4167 sw_context->dx_ctx_node = NULL;
4168 sw_context->dx_query_mob = NULL;
4169 sw_context->dx_query_ctx = NULL;
4170 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4171 INIT_LIST_HEAD(&sw_context->validate_nodes);
4172 INIT_LIST_HEAD(&sw_context->res_relocations);
4173 if (sw_context->staged_bindings)
4174 vmw_binding_state_reset(sw_context->staged_bindings);
4175
4176 if (!sw_context->res_ht_initialized) {
4177 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
4178 if (unlikely(ret != 0))
4179 goto out_unlock;
4180 sw_context->res_ht_initialized = true;
4181 }
4182 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4183 INIT_LIST_HEAD(&resource_list);
4184 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4185 if (unlikely(ret != 0)) {
4186 list_splice_init(&sw_context->ctx_resource_list,
4187 &sw_context->resource_list);
4188 goto out_err_nores;
4189 }
4190
4191 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4192 command_size);
4193 /*
4194 * Merge the resource lists before checking the return status
4195 * from vmd_cmd_check_all so that all the open hashtabs will
4196 * be handled properly even if vmw_cmd_check_all fails.
4197 */
4198 list_splice_init(&sw_context->ctx_resource_list,
4199 &sw_context->resource_list);
4200
4201 if (unlikely(ret != 0))
4202 goto out_err_nores;
4203
4204 ret = vmw_resources_reserve(sw_context);
4205 if (unlikely(ret != 0))
4206 goto out_err_nores;
4207
4208 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
4209 true, NULL);
4210 if (unlikely(ret != 0))
4211 goto out_err_nores;
4212
4213 ret = vmw_validate_buffers(dev_priv, sw_context);
4214 if (unlikely(ret != 0))
4215 goto out_err;
4216
4217 ret = vmw_resources_validate(sw_context);
4218 if (unlikely(ret != 0))
4219 goto out_err;
4220
4221 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4222 if (unlikely(ret != 0)) {
4223 ret = -ERESTARTSYS;
4224 goto out_err;
4225 }
4226
4227 if (dev_priv->has_mob) {
4228 ret = vmw_rebind_contexts(sw_context);
4229 if (unlikely(ret != 0))
4230 goto out_unlock_binding;
4231 }
4232
4233 if (!header) {
4234 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4235 command_size, sw_context);
4236 } else {
4237 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4238 sw_context);
4239 header = NULL;
4240 }
4241 mutex_unlock(&dev_priv->binding_mutex);
4242 if (ret)
4243 goto out_err;
4244
4245 vmw_query_bo_switch_commit(dev_priv, sw_context);
4246 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
4247 &fence,
4248 (user_fence_rep) ? &handle : NULL);
4249 /*
4250 * This error is harmless, because if fence submission fails,
4251 * vmw_fifo_send_fence will sync. The error will be propagated to
4252 * user-space in @fence_rep
4253 */
4254
4255 if (ret != 0)
4256 DRM_ERROR("Fence submission error. Syncing.\n");
4257
4258 vmw_resources_unreserve(sw_context, false);
4259
4260 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
4261 (void *) fence);
4262
4263 if (unlikely(dev_priv->pinned_bo != NULL &&
4264 !dev_priv->query_cid_valid))
4265 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4266
4267 vmw_clear_validations(sw_context);
4268
4269 /*
4270 * If anything fails here, give up trying to export the fence
4271 * and do a sync since the user mode will not be able to sync
4272 * the fence itself. This ensures we are still functionally
4273 * correct.
4274 */
4275 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4276
4277 sync_file = sync_file_create(&fence->base);
4278 if (!sync_file) {
4279 DRM_ERROR("Unable to create sync file for fence\n");
4280 put_unused_fd(out_fence_fd);
4281 out_fence_fd = -1;
4282
4283 (void) vmw_fence_obj_wait(fence, false, false,
4284 VMW_FENCE_WAIT_TIMEOUT);
4285 } else {
4286 /* Link the fence with the FD created earlier */
4287 fd_install(out_fence_fd, sync_file->file);
4288 }
4289 }
4290
4291 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4292 user_fence_rep, fence, handle,
4293 out_fence_fd, sync_file);
4294
4295 /* Don't unreference when handing fence out */
4296 if (unlikely(out_fence != NULL)) {
4297 *out_fence = fence;
4298 fence = NULL;
4299 } else if (likely(fence != NULL)) {
4300 vmw_fence_obj_unreference(&fence);
4301 }
4302
4303 list_splice_init(&sw_context->resource_list, &resource_list);
4304 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4305 mutex_unlock(&dev_priv->cmdbuf_mutex);
4306
4307 /*
4308 * Unreference resources outside of the cmdbuf_mutex to
4309 * avoid deadlocks in resource destruction paths.
4310 */
4311 vmw_resource_list_unreference(sw_context, &resource_list);
4312
4313 return 0;
4314
4315 out_unlock_binding:
4316 mutex_unlock(&dev_priv->binding_mutex);
4317 out_err:
4318 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
4319 out_err_nores:
4320 vmw_resources_unreserve(sw_context, true);
4321 vmw_resource_relocations_free(&sw_context->res_relocations);
4322 vmw_free_relocations(sw_context);
4323 vmw_clear_validations(sw_context);
4324 if (unlikely(dev_priv->pinned_bo != NULL &&
4325 !dev_priv->query_cid_valid))
4326 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4327 out_unlock:
4328 list_splice_init(&sw_context->resource_list, &resource_list);
4329 error_resource = sw_context->error_resource;
4330 sw_context->error_resource = NULL;
4331 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4332 mutex_unlock(&dev_priv->cmdbuf_mutex);
4333
4334 /*
4335 * Unreference resources outside of the cmdbuf_mutex to
4336 * avoid deadlocks in resource destruction paths.
4337 */
4338 vmw_resource_list_unreference(sw_context, &resource_list);
4339 if (unlikely(error_resource != NULL))
4340 vmw_resource_unreference(&error_resource);
4341 out_free_header:
4342 if (header)
4343 vmw_cmdbuf_header_free(header);
4344 out_free_fence_fd:
4345 if (out_fence_fd >= 0)
4346 put_unused_fd(out_fence_fd);
4347
4348 return ret;
4349 }
4350
4351 /**
4352 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4353 *
4354 * @dev_priv: The device private structure.
4355 *
4356 * This function is called to idle the fifo and unpin the query buffer
4357 * if the normal way to do this hits an error, which should typically be
4358 * extremely rare.
4359 */
vmw_execbuf_unpin_panic(struct vmw_private * dev_priv)4360 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4361 {
4362 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4363
4364 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4365 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4366 if (dev_priv->dummy_query_bo_pinned) {
4367 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4368 dev_priv->dummy_query_bo_pinned = false;
4369 }
4370 }
4371
4372
4373 /**
4374 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4375 * query bo.
4376 *
4377 * @dev_priv: The device private structure.
4378 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4379 * _after_ a query barrier that flushes all queries touching the current
4380 * buffer pointed to by @dev_priv->pinned_bo
4381 *
4382 * This function should be used to unpin the pinned query bo, or
4383 * as a query barrier when we need to make sure that all queries have
4384 * finished before the next fifo command. (For example on hardware
4385 * context destructions where the hardware may otherwise leak unfinished
4386 * queries).
4387 *
4388 * This function does not return any failure codes, but make attempts
4389 * to do safe unpinning in case of errors.
4390 *
4391 * The function will synchronize on the previous query barrier, and will
4392 * thus not finish until that barrier has executed.
4393 *
4394 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4395 * before calling this function.
4396 */
__vmw_execbuf_release_pinned_bo(struct vmw_private * dev_priv,struct vmw_fence_obj * fence)4397 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4398 struct vmw_fence_obj *fence)
4399 {
4400 int ret = 0;
4401 struct list_head validate_list;
4402 struct ttm_validate_buffer pinned_val, query_val;
4403 struct vmw_fence_obj *lfence = NULL;
4404 struct ww_acquire_ctx ticket;
4405
4406 if (dev_priv->pinned_bo == NULL)
4407 goto out_unlock;
4408
4409 INIT_LIST_HEAD(&validate_list);
4410
4411 pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
4412 pinned_val.shared = false;
4413 list_add_tail(&pinned_val.head, &validate_list);
4414
4415 query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
4416 query_val.shared = false;
4417 list_add_tail(&query_val.head, &validate_list);
4418
4419 ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
4420 false, NULL);
4421 if (unlikely(ret != 0)) {
4422 vmw_execbuf_unpin_panic(dev_priv);
4423 goto out_no_reserve;
4424 }
4425
4426 if (dev_priv->query_cid_valid) {
4427 BUG_ON(fence != NULL);
4428 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4429 if (unlikely(ret != 0)) {
4430 vmw_execbuf_unpin_panic(dev_priv);
4431 goto out_no_emit;
4432 }
4433 dev_priv->query_cid_valid = false;
4434 }
4435
4436 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4437 if (dev_priv->dummy_query_bo_pinned) {
4438 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4439 dev_priv->dummy_query_bo_pinned = false;
4440 }
4441 if (fence == NULL) {
4442 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4443 NULL);
4444 fence = lfence;
4445 }
4446 ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
4447 if (lfence != NULL)
4448 vmw_fence_obj_unreference(&lfence);
4449
4450 ttm_bo_unref(&query_val.bo);
4451 ttm_bo_unref(&pinned_val.bo);
4452 vmw_bo_unreference(&dev_priv->pinned_bo);
4453 out_unlock:
4454 return;
4455
4456 out_no_emit:
4457 ttm_eu_backoff_reservation(&ticket, &validate_list);
4458 out_no_reserve:
4459 ttm_bo_unref(&query_val.bo);
4460 ttm_bo_unref(&pinned_val.bo);
4461 vmw_bo_unreference(&dev_priv->pinned_bo);
4462 }
4463
4464 /**
4465 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4466 * query bo.
4467 *
4468 * @dev_priv: The device private structure.
4469 *
4470 * This function should be used to unpin the pinned query bo, or
4471 * as a query barrier when we need to make sure that all queries have
4472 * finished before the next fifo command. (For example on hardware
4473 * context destructions where the hardware may otherwise leak unfinished
4474 * queries).
4475 *
4476 * This function does not return any failure codes, but make attempts
4477 * to do safe unpinning in case of errors.
4478 *
4479 * The function will synchronize on the previous query barrier, and will
4480 * thus not finish until that barrier has executed.
4481 */
vmw_execbuf_release_pinned_bo(struct vmw_private * dev_priv)4482 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4483 {
4484 mutex_lock(&dev_priv->cmdbuf_mutex);
4485 if (dev_priv->query_cid_valid)
4486 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4487 mutex_unlock(&dev_priv->cmdbuf_mutex);
4488 }
4489
vmw_execbuf_ioctl(struct drm_device * dev,unsigned long data,struct drm_file * file_priv,size_t size)4490 int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4491 struct drm_file *file_priv, size_t size)
4492 {
4493 struct vmw_private *dev_priv = vmw_priv(dev);
4494 struct drm_vmw_execbuf_arg arg;
4495 int ret;
4496 static const size_t copy_offset[] = {
4497 offsetof(struct drm_vmw_execbuf_arg, context_handle),
4498 sizeof(struct drm_vmw_execbuf_arg)};
4499 struct dma_fence *in_fence = NULL;
4500
4501 if (unlikely(size < copy_offset[0])) {
4502 DRM_ERROR("Invalid command size, ioctl %d\n",
4503 DRM_VMW_EXECBUF);
4504 return -EINVAL;
4505 }
4506
4507 if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4508 return -EFAULT;
4509
4510 /*
4511 * Extend the ioctl argument while
4512 * maintaining backwards compatibility:
4513 * We take different code paths depending on the value of
4514 * arg.version.
4515 */
4516
4517 if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4518 arg.version == 0)) {
4519 DRM_ERROR("Incorrect execbuf version.\n");
4520 return -EINVAL;
4521 }
4522
4523 if (arg.version > 1 &&
4524 copy_from_user(&arg.context_handle,
4525 (void __user *) (data + copy_offset[0]),
4526 copy_offset[arg.version - 1] -
4527 copy_offset[0]) != 0)
4528 return -EFAULT;
4529
4530 switch (arg.version) {
4531 case 1:
4532 arg.context_handle = (uint32_t) -1;
4533 break;
4534 case 2:
4535 default:
4536 break;
4537 }
4538
4539
4540 /* If imported a fence FD from elsewhere, then wait on it */
4541 if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4542 in_fence = sync_file_get_fence(arg.imported_fence_fd);
4543
4544 if (!in_fence) {
4545 DRM_ERROR("Cannot get imported fence\n");
4546 return -EINVAL;
4547 }
4548
4549 ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4550 if (ret)
4551 goto out;
4552 }
4553
4554 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4555 if (unlikely(ret != 0))
4556 return ret;
4557
4558 ret = vmw_execbuf_process(file_priv, dev_priv,
4559 (void __user *)(unsigned long)arg.commands,
4560 NULL, arg.command_size, arg.throttle_us,
4561 arg.context_handle,
4562 (void __user *)(unsigned long)arg.fence_rep,
4563 NULL,
4564 arg.flags);
4565 ttm_read_unlock(&dev_priv->reservation_sem);
4566 if (unlikely(ret != 0))
4567 goto out;
4568
4569 vmw_kms_cursor_post_execbuf(dev_priv);
4570
4571 out:
4572 if (in_fence)
4573 dma_fence_put(in_fence);
4574 return ret;
4575 }
4576