1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/ttm/ttm_placement.h>
29 
30 #include "vmwgfx_drv.h"
31 #include "vmwgfx_resource_priv.h"
32 #include "vmwgfx_so.h"
33 #include "vmwgfx_binding.h"
34 #include "device_include/svga3d_surfacedefs.h"
35 
36 #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
37 #define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
38 #define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
39 	(svga3d_flags & ((uint64_t)U32_MAX))
40 
41 /**
42  * struct vmw_user_surface - User-space visible surface resource
43  *
44  * @base:           The TTM base object handling user-space visibility.
45  * @srf:            The surface metadata.
46  * @size:           TTM accounting size for the surface.
47  * @master: master of the creating client. Used for security check.
48  */
49 struct vmw_user_surface {
50 	struct ttm_prime_object prime;
51 	struct vmw_surface srf;
52 	uint32_t size;
53 	struct drm_master *master;
54 	struct ttm_base_object *backup_base;
55 };
56 
57 /**
58  * struct vmw_surface_offset - Backing store mip level offset info
59  *
60  * @face:           Surface face.
61  * @mip:            Mip level.
62  * @bo_offset:      Offset into backing store of this mip level.
63  *
64  */
65 struct vmw_surface_offset {
66 	uint32_t face;
67 	uint32_t mip;
68 	uint32_t bo_offset;
69 };
70 
71 static void vmw_user_surface_free(struct vmw_resource *res);
72 static struct vmw_resource *
73 vmw_user_surface_base_to_res(struct ttm_base_object *base);
74 static int vmw_legacy_srf_bind(struct vmw_resource *res,
75 			       struct ttm_validate_buffer *val_buf);
76 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
77 				 bool readback,
78 				 struct ttm_validate_buffer *val_buf);
79 static int vmw_legacy_srf_create(struct vmw_resource *res);
80 static int vmw_legacy_srf_destroy(struct vmw_resource *res);
81 static int vmw_gb_surface_create(struct vmw_resource *res);
82 static int vmw_gb_surface_bind(struct vmw_resource *res,
83 			       struct ttm_validate_buffer *val_buf);
84 static int vmw_gb_surface_unbind(struct vmw_resource *res,
85 				 bool readback,
86 				 struct ttm_validate_buffer *val_buf);
87 static int vmw_gb_surface_destroy(struct vmw_resource *res);
88 static int
89 vmw_gb_surface_define_internal(struct drm_device *dev,
90 			       struct drm_vmw_gb_surface_create_ext_req *req,
91 			       struct drm_vmw_gb_surface_create_rep *rep,
92 			       struct drm_file *file_priv);
93 static int
94 vmw_gb_surface_reference_internal(struct drm_device *dev,
95 				  struct drm_vmw_surface_arg *req,
96 				  struct drm_vmw_gb_surface_ref_ext_rep *rep,
97 				  struct drm_file *file_priv);
98 
99 static const struct vmw_user_resource_conv user_surface_conv = {
100 	.object_type = VMW_RES_SURFACE,
101 	.base_obj_to_res = vmw_user_surface_base_to_res,
102 	.res_free = vmw_user_surface_free
103 };
104 
105 const struct vmw_user_resource_conv *user_surface_converter =
106 	&user_surface_conv;
107 
108 
109 static uint64_t vmw_user_surface_size;
110 
111 static const struct vmw_res_func vmw_legacy_surface_func = {
112 	.res_type = vmw_res_surface,
113 	.needs_backup = false,
114 	.may_evict = true,
115 	.prio = 1,
116 	.dirty_prio = 1,
117 	.type_name = "legacy surfaces",
118 	.backup_placement = &vmw_srf_placement,
119 	.create = &vmw_legacy_srf_create,
120 	.destroy = &vmw_legacy_srf_destroy,
121 	.bind = &vmw_legacy_srf_bind,
122 	.unbind = &vmw_legacy_srf_unbind
123 };
124 
125 static const struct vmw_res_func vmw_gb_surface_func = {
126 	.res_type = vmw_res_surface,
127 	.needs_backup = true,
128 	.may_evict = true,
129 	.prio = 1,
130 	.dirty_prio = 2,
131 	.type_name = "guest backed surfaces",
132 	.backup_placement = &vmw_mob_placement,
133 	.create = vmw_gb_surface_create,
134 	.destroy = vmw_gb_surface_destroy,
135 	.bind = vmw_gb_surface_bind,
136 	.unbind = vmw_gb_surface_unbind
137 };
138 
139 /**
140  * struct vmw_surface_dma - SVGA3D DMA command
141  */
142 struct vmw_surface_dma {
143 	SVGA3dCmdHeader header;
144 	SVGA3dCmdSurfaceDMA body;
145 	SVGA3dCopyBox cb;
146 	SVGA3dCmdSurfaceDMASuffix suffix;
147 };
148 
149 /**
150  * struct vmw_surface_define - SVGA3D Surface Define command
151  */
152 struct vmw_surface_define {
153 	SVGA3dCmdHeader header;
154 	SVGA3dCmdDefineSurface body;
155 };
156 
157 /**
158  * struct vmw_surface_destroy - SVGA3D Surface Destroy command
159  */
160 struct vmw_surface_destroy {
161 	SVGA3dCmdHeader header;
162 	SVGA3dCmdDestroySurface body;
163 };
164 
165 
166 /**
167  * vmw_surface_dma_size - Compute fifo size for a dma command.
168  *
169  * @srf: Pointer to a struct vmw_surface
170  *
171  * Computes the required size for a surface dma command for backup or
172  * restoration of the surface represented by @srf.
173  */
vmw_surface_dma_size(const struct vmw_surface * srf)174 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
175 {
176 	return srf->num_sizes * sizeof(struct vmw_surface_dma);
177 }
178 
179 
180 /**
181  * vmw_surface_define_size - Compute fifo size for a surface define command.
182  *
183  * @srf: Pointer to a struct vmw_surface
184  *
185  * Computes the required size for a surface define command for the definition
186  * of the surface represented by @srf.
187  */
vmw_surface_define_size(const struct vmw_surface * srf)188 static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
189 {
190 	return sizeof(struct vmw_surface_define) + srf->num_sizes *
191 		sizeof(SVGA3dSize);
192 }
193 
194 
195 /**
196  * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
197  *
198  * Computes the required size for a surface destroy command for the destruction
199  * of a hw surface.
200  */
vmw_surface_destroy_size(void)201 static inline uint32_t vmw_surface_destroy_size(void)
202 {
203 	return sizeof(struct vmw_surface_destroy);
204 }
205 
206 /**
207  * vmw_surface_destroy_encode - Encode a surface_destroy command.
208  *
209  * @id: The surface id
210  * @cmd_space: Pointer to memory area in which the commands should be encoded.
211  */
vmw_surface_destroy_encode(uint32_t id,void * cmd_space)212 static void vmw_surface_destroy_encode(uint32_t id,
213 				       void *cmd_space)
214 {
215 	struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
216 		cmd_space;
217 
218 	cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
219 	cmd->header.size = sizeof(cmd->body);
220 	cmd->body.sid = id;
221 }
222 
223 /**
224  * vmw_surface_define_encode - Encode a surface_define command.
225  *
226  * @srf: Pointer to a struct vmw_surface object.
227  * @cmd_space: Pointer to memory area in which the commands should be encoded.
228  */
vmw_surface_define_encode(const struct vmw_surface * srf,void * cmd_space)229 static void vmw_surface_define_encode(const struct vmw_surface *srf,
230 				      void *cmd_space)
231 {
232 	struct vmw_surface_define *cmd = (struct vmw_surface_define *)
233 		cmd_space;
234 	struct drm_vmw_size *src_size;
235 	SVGA3dSize *cmd_size;
236 	uint32_t cmd_len;
237 	int i;
238 
239 	cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
240 
241 	cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
242 	cmd->header.size = cmd_len;
243 	cmd->body.sid = srf->res.id;
244 	/*
245 	 * Downcast of surfaceFlags, was upcasted when received from user-space,
246 	 * since driver internally stores as 64 bit.
247 	 * For legacy surface define only 32 bit flag is supported.
248 	 */
249 	cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->flags;
250 	cmd->body.format = srf->format;
251 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
252 		cmd->body.face[i].numMipLevels = srf->mip_levels[i];
253 
254 	cmd += 1;
255 	cmd_size = (SVGA3dSize *) cmd;
256 	src_size = srf->sizes;
257 
258 	for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
259 		cmd_size->width = src_size->width;
260 		cmd_size->height = src_size->height;
261 		cmd_size->depth = src_size->depth;
262 	}
263 }
264 
265 /**
266  * vmw_surface_dma_encode - Encode a surface_dma command.
267  *
268  * @srf: Pointer to a struct vmw_surface object.
269  * @cmd_space: Pointer to memory area in which the commands should be encoded.
270  * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
271  * should be placed or read from.
272  * @to_surface: Boolean whether to DMA to the surface or from the surface.
273  */
vmw_surface_dma_encode(struct vmw_surface * srf,void * cmd_space,const SVGAGuestPtr * ptr,bool to_surface)274 static void vmw_surface_dma_encode(struct vmw_surface *srf,
275 				   void *cmd_space,
276 				   const SVGAGuestPtr *ptr,
277 				   bool to_surface)
278 {
279 	uint32_t i;
280 	struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
281 	const struct svga3d_surface_desc *desc =
282 		svga3dsurface_get_desc(srf->format);
283 
284 	for (i = 0; i < srf->num_sizes; ++i) {
285 		SVGA3dCmdHeader *header = &cmd->header;
286 		SVGA3dCmdSurfaceDMA *body = &cmd->body;
287 		SVGA3dCopyBox *cb = &cmd->cb;
288 		SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
289 		const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
290 		const struct drm_vmw_size *cur_size = &srf->sizes[i];
291 
292 		header->id = SVGA_3D_CMD_SURFACE_DMA;
293 		header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
294 
295 		body->guest.ptr = *ptr;
296 		body->guest.ptr.offset += cur_offset->bo_offset;
297 		body->guest.pitch = svga3dsurface_calculate_pitch(desc,
298 								  cur_size);
299 		body->host.sid = srf->res.id;
300 		body->host.face = cur_offset->face;
301 		body->host.mipmap = cur_offset->mip;
302 		body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
303 				  SVGA3D_READ_HOST_VRAM);
304 		cb->x = 0;
305 		cb->y = 0;
306 		cb->z = 0;
307 		cb->srcx = 0;
308 		cb->srcy = 0;
309 		cb->srcz = 0;
310 		cb->w = cur_size->width;
311 		cb->h = cur_size->height;
312 		cb->d = cur_size->depth;
313 
314 		suffix->suffixSize = sizeof(*suffix);
315 		suffix->maximumOffset =
316 			svga3dsurface_get_image_buffer_size(desc, cur_size,
317 							    body->guest.pitch);
318 		suffix->flags.discard = 0;
319 		suffix->flags.unsynchronized = 0;
320 		suffix->flags.reserved = 0;
321 		++cmd;
322 	}
323 };
324 
325 
326 /**
327  * vmw_hw_surface_destroy - destroy a Device surface
328  *
329  * @res:        Pointer to a struct vmw_resource embedded in a struct
330  *              vmw_surface.
331  *
332  * Destroys a the device surface associated with a struct vmw_surface if
333  * any, and adjusts accounting and resource count accordingly.
334  */
vmw_hw_surface_destroy(struct vmw_resource * res)335 static void vmw_hw_surface_destroy(struct vmw_resource *res)
336 {
337 
338 	struct vmw_private *dev_priv = res->dev_priv;
339 	struct vmw_surface *srf;
340 	void *cmd;
341 
342 	if (res->func->destroy == vmw_gb_surface_destroy) {
343 		(void) vmw_gb_surface_destroy(res);
344 		return;
345 	}
346 
347 	if (res->id != -1) {
348 
349 		cmd = VMW_FIFO_RESERVE(dev_priv, vmw_surface_destroy_size());
350 		if (unlikely(!cmd))
351 			return;
352 
353 		vmw_surface_destroy_encode(res->id, cmd);
354 		vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
355 
356 		/*
357 		 * used_memory_size_atomic, or separate lock
358 		 * to avoid taking dev_priv::cmdbuf_mutex in
359 		 * the destroy path.
360 		 */
361 
362 		mutex_lock(&dev_priv->cmdbuf_mutex);
363 		srf = vmw_res_to_srf(res);
364 		dev_priv->used_memory_size -= res->backup_size;
365 		mutex_unlock(&dev_priv->cmdbuf_mutex);
366 	}
367 }
368 
369 /**
370  * vmw_legacy_srf_create - Create a device surface as part of the
371  * resource validation process.
372  *
373  * @res: Pointer to a struct vmw_surface.
374  *
375  * If the surface doesn't have a hw id.
376  *
377  * Returns -EBUSY if there wasn't sufficient device resources to
378  * complete the validation. Retry after freeing up resources.
379  *
380  * May return other errors if the kernel is out of guest resources.
381  */
vmw_legacy_srf_create(struct vmw_resource * res)382 static int vmw_legacy_srf_create(struct vmw_resource *res)
383 {
384 	struct vmw_private *dev_priv = res->dev_priv;
385 	struct vmw_surface *srf;
386 	uint32_t submit_size;
387 	uint8_t *cmd;
388 	int ret;
389 
390 	if (likely(res->id != -1))
391 		return 0;
392 
393 	srf = vmw_res_to_srf(res);
394 	if (unlikely(dev_priv->used_memory_size + res->backup_size >=
395 		     dev_priv->memory_size))
396 		return -EBUSY;
397 
398 	/*
399 	 * Alloc id for the resource.
400 	 */
401 
402 	ret = vmw_resource_alloc_id(res);
403 	if (unlikely(ret != 0)) {
404 		DRM_ERROR("Failed to allocate a surface id.\n");
405 		goto out_no_id;
406 	}
407 
408 	if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
409 		ret = -EBUSY;
410 		goto out_no_fifo;
411 	}
412 
413 	/*
414 	 * Encode surface define- commands.
415 	 */
416 
417 	submit_size = vmw_surface_define_size(srf);
418 	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
419 	if (unlikely(!cmd)) {
420 		ret = -ENOMEM;
421 		goto out_no_fifo;
422 	}
423 
424 	vmw_surface_define_encode(srf, cmd);
425 	vmw_fifo_commit(dev_priv, submit_size);
426 	vmw_fifo_resource_inc(dev_priv);
427 
428 	/*
429 	 * Surface memory usage accounting.
430 	 */
431 
432 	dev_priv->used_memory_size += res->backup_size;
433 	return 0;
434 
435 out_no_fifo:
436 	vmw_resource_release_id(res);
437 out_no_id:
438 	return ret;
439 }
440 
441 /**
442  * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
443  *
444  * @res:            Pointer to a struct vmw_res embedded in a struct
445  *                  vmw_surface.
446  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
447  *                  information about the backup buffer.
448  * @bind:           Boolean wether to DMA to the surface.
449  *
450  * Transfer backup data to or from a legacy surface as part of the
451  * validation process.
452  * May return other errors if the kernel is out of guest resources.
453  * The backup buffer will be fenced or idle upon successful completion,
454  * and if the surface needs persistent backup storage, the backup buffer
455  * will also be returned reserved iff @bind is true.
456  */
vmw_legacy_srf_dma(struct vmw_resource * res,struct ttm_validate_buffer * val_buf,bool bind)457 static int vmw_legacy_srf_dma(struct vmw_resource *res,
458 			      struct ttm_validate_buffer *val_buf,
459 			      bool bind)
460 {
461 	SVGAGuestPtr ptr;
462 	struct vmw_fence_obj *fence;
463 	uint32_t submit_size;
464 	struct vmw_surface *srf = vmw_res_to_srf(res);
465 	uint8_t *cmd;
466 	struct vmw_private *dev_priv = res->dev_priv;
467 
468 	BUG_ON(!val_buf->bo);
469 	submit_size = vmw_surface_dma_size(srf);
470 	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
471 	if (unlikely(!cmd))
472 		return -ENOMEM;
473 
474 	vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
475 	vmw_surface_dma_encode(srf, cmd, &ptr, bind);
476 
477 	vmw_fifo_commit(dev_priv, submit_size);
478 
479 	/*
480 	 * Create a fence object and fence the backup buffer.
481 	 */
482 
483 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
484 					  &fence, NULL);
485 
486 	vmw_bo_fence_single(val_buf->bo, fence);
487 
488 	if (likely(fence != NULL))
489 		vmw_fence_obj_unreference(&fence);
490 
491 	return 0;
492 }
493 
494 /**
495  * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
496  *                       surface validation process.
497  *
498  * @res:            Pointer to a struct vmw_res embedded in a struct
499  *                  vmw_surface.
500  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
501  *                  information about the backup buffer.
502  *
503  * This function will copy backup data to the surface if the
504  * backup buffer is dirty.
505  */
vmw_legacy_srf_bind(struct vmw_resource * res,struct ttm_validate_buffer * val_buf)506 static int vmw_legacy_srf_bind(struct vmw_resource *res,
507 			       struct ttm_validate_buffer *val_buf)
508 {
509 	if (!res->backup_dirty)
510 		return 0;
511 
512 	return vmw_legacy_srf_dma(res, val_buf, true);
513 }
514 
515 
516 /**
517  * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
518  *                         surface eviction process.
519  *
520  * @res:            Pointer to a struct vmw_res embedded in a struct
521  *                  vmw_surface.
522  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
523  *                  information about the backup buffer.
524  *
525  * This function will copy backup data from the surface.
526  */
vmw_legacy_srf_unbind(struct vmw_resource * res,bool readback,struct ttm_validate_buffer * val_buf)527 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
528 				 bool readback,
529 				 struct ttm_validate_buffer *val_buf)
530 {
531 	if (unlikely(readback))
532 		return vmw_legacy_srf_dma(res, val_buf, false);
533 	return 0;
534 }
535 
536 /**
537  * vmw_legacy_srf_destroy - Destroy a device surface as part of a
538  *                          resource eviction process.
539  *
540  * @res:            Pointer to a struct vmw_res embedded in a struct
541  *                  vmw_surface.
542  */
vmw_legacy_srf_destroy(struct vmw_resource * res)543 static int vmw_legacy_srf_destroy(struct vmw_resource *res)
544 {
545 	struct vmw_private *dev_priv = res->dev_priv;
546 	uint32_t submit_size;
547 	uint8_t *cmd;
548 
549 	BUG_ON(res->id == -1);
550 
551 	/*
552 	 * Encode the dma- and surface destroy commands.
553 	 */
554 
555 	submit_size = vmw_surface_destroy_size();
556 	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
557 	if (unlikely(!cmd))
558 		return -ENOMEM;
559 
560 	vmw_surface_destroy_encode(res->id, cmd);
561 	vmw_fifo_commit(dev_priv, submit_size);
562 
563 	/*
564 	 * Surface memory usage accounting.
565 	 */
566 
567 	dev_priv->used_memory_size -= res->backup_size;
568 
569 	/*
570 	 * Release the surface ID.
571 	 */
572 
573 	vmw_resource_release_id(res);
574 	vmw_fifo_resource_dec(dev_priv);
575 
576 	return 0;
577 }
578 
579 
580 /**
581  * vmw_surface_init - initialize a struct vmw_surface
582  *
583  * @dev_priv:       Pointer to a device private struct.
584  * @srf:            Pointer to the struct vmw_surface to initialize.
585  * @res_free:       Pointer to a resource destructor used to free
586  *                  the object.
587  */
vmw_surface_init(struct vmw_private * dev_priv,struct vmw_surface * srf,void (* res_free)(struct vmw_resource * res))588 static int vmw_surface_init(struct vmw_private *dev_priv,
589 			    struct vmw_surface *srf,
590 			    void (*res_free) (struct vmw_resource *res))
591 {
592 	int ret;
593 	struct vmw_resource *res = &srf->res;
594 
595 	BUG_ON(!res_free);
596 	ret = vmw_resource_init(dev_priv, res, true, res_free,
597 				(dev_priv->has_mob) ? &vmw_gb_surface_func :
598 				&vmw_legacy_surface_func);
599 
600 	if (unlikely(ret != 0)) {
601 		res_free(res);
602 		return ret;
603 	}
604 
605 	/*
606 	 * The surface won't be visible to hardware until a
607 	 * surface validate.
608 	 */
609 
610 	INIT_LIST_HEAD(&srf->view_list);
611 	res->hw_destroy = vmw_hw_surface_destroy;
612 	return ret;
613 }
614 
615 /**
616  * vmw_user_surface_base_to_res - TTM base object to resource converter for
617  *                                user visible surfaces
618  *
619  * @base:           Pointer to a TTM base object
620  *
621  * Returns the struct vmw_resource embedded in a struct vmw_surface
622  * for the user-visible object identified by the TTM base object @base.
623  */
624 static struct vmw_resource *
vmw_user_surface_base_to_res(struct ttm_base_object * base)625 vmw_user_surface_base_to_res(struct ttm_base_object *base)
626 {
627 	return &(container_of(base, struct vmw_user_surface,
628 			      prime.base)->srf.res);
629 }
630 
631 /**
632  * vmw_user_surface_free - User visible surface resource destructor
633  *
634  * @res:            A struct vmw_resource embedded in a struct vmw_surface.
635  */
vmw_user_surface_free(struct vmw_resource * res)636 static void vmw_user_surface_free(struct vmw_resource *res)
637 {
638 	struct vmw_surface *srf = vmw_res_to_srf(res);
639 	struct vmw_user_surface *user_srf =
640 	    container_of(srf, struct vmw_user_surface, srf);
641 	struct vmw_private *dev_priv = srf->res.dev_priv;
642 	uint32_t size = user_srf->size;
643 
644 	if (user_srf->master)
645 		drm_master_put(&user_srf->master);
646 	kfree(srf->offsets);
647 	kfree(srf->sizes);
648 	kfree(srf->snooper.image);
649 	ttm_prime_object_kfree(user_srf, prime);
650 	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
651 }
652 
653 /**
654  * vmw_user_surface_free - User visible surface TTM base object destructor
655  *
656  * @p_base:         Pointer to a pointer to a TTM base object
657  *                  embedded in a struct vmw_user_surface.
658  *
659  * Drops the base object's reference on its resource, and the
660  * pointer pointed to by *p_base is set to NULL.
661  */
vmw_user_surface_base_release(struct ttm_base_object ** p_base)662 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
663 {
664 	struct ttm_base_object *base = *p_base;
665 	struct vmw_user_surface *user_srf =
666 	    container_of(base, struct vmw_user_surface, prime.base);
667 	struct vmw_resource *res = &user_srf->srf.res;
668 
669 	*p_base = NULL;
670 	if (user_srf->backup_base)
671 		ttm_base_object_unref(&user_srf->backup_base);
672 	vmw_resource_unreference(&res);
673 }
674 
675 /**
676  * vmw_user_surface_destroy_ioctl - Ioctl function implementing
677  *                                  the user surface destroy functionality.
678  *
679  * @dev:            Pointer to a struct drm_device.
680  * @data:           Pointer to data copied from / to user-space.
681  * @file_priv:      Pointer to a drm file private structure.
682  */
vmw_surface_destroy_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)683 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
684 			      struct drm_file *file_priv)
685 {
686 	struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
687 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
688 
689 	return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
690 }
691 
692 /**
693  * vmw_user_surface_define_ioctl - Ioctl function implementing
694  *                                  the user surface define functionality.
695  *
696  * @dev:            Pointer to a struct drm_device.
697  * @data:           Pointer to data copied from / to user-space.
698  * @file_priv:      Pointer to a drm file private structure.
699  */
vmw_surface_define_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)700 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
701 			     struct drm_file *file_priv)
702 {
703 	struct vmw_private *dev_priv = vmw_priv(dev);
704 	struct vmw_user_surface *user_srf;
705 	struct vmw_surface *srf;
706 	struct vmw_resource *res;
707 	struct vmw_resource *tmp;
708 	union drm_vmw_surface_create_arg *arg =
709 	    (union drm_vmw_surface_create_arg *)data;
710 	struct drm_vmw_surface_create_req *req = &arg->req;
711 	struct drm_vmw_surface_arg *rep = &arg->rep;
712 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
713 	struct ttm_operation_ctx ctx = {
714 		.interruptible = true,
715 		.no_wait_gpu = false
716 	};
717 	int ret;
718 	int i, j;
719 	uint32_t cur_bo_offset;
720 	struct drm_vmw_size *cur_size;
721 	struct vmw_surface_offset *cur_offset;
722 	uint32_t num_sizes;
723 	uint32_t size;
724 	const struct svga3d_surface_desc *desc;
725 
726 	if (unlikely(vmw_user_surface_size == 0))
727 		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
728 			VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
729 
730 	num_sizes = 0;
731 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
732 		if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
733 			return -EINVAL;
734 		num_sizes += req->mip_levels[i];
735 	}
736 
737 	if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
738 	    num_sizes == 0)
739 		return -EINVAL;
740 
741 	size = vmw_user_surface_size +
742 		ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
743 		ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
744 
745 	desc = svga3dsurface_get_desc(req->format);
746 	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
747 		VMW_DEBUG_USER("Invalid format %d for surface creation.\n",
748 			       req->format);
749 		return -EINVAL;
750 	}
751 
752 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
753 	if (unlikely(ret != 0))
754 		return ret;
755 
756 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
757 				   size, &ctx);
758 	if (unlikely(ret != 0)) {
759 		if (ret != -ERESTARTSYS)
760 			DRM_ERROR("Out of graphics memory for surface.\n");
761 		goto out_unlock;
762 	}
763 
764 	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
765 	if (unlikely(!user_srf)) {
766 		ret = -ENOMEM;
767 		goto out_no_user_srf;
768 	}
769 
770 	srf = &user_srf->srf;
771 	res = &srf->res;
772 
773 	/* Driver internally stores as 64-bit flags */
774 	srf->flags = (SVGA3dSurfaceAllFlags)req->flags;
775 	srf->format = req->format;
776 	srf->scanout = req->scanout;
777 
778 	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
779 	srf->num_sizes = num_sizes;
780 	user_srf->size = size;
781 	srf->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long)
782 				 req->size_addr,
783 				 sizeof(*srf->sizes) * srf->num_sizes);
784 	if (IS_ERR(srf->sizes)) {
785 		ret = PTR_ERR(srf->sizes);
786 		goto out_no_sizes;
787 	}
788 	srf->offsets = kmalloc_array(srf->num_sizes,
789 				     sizeof(*srf->offsets),
790 				     GFP_KERNEL);
791 	if (unlikely(!srf->offsets)) {
792 		ret = -ENOMEM;
793 		goto out_no_offsets;
794 	}
795 
796 	srf->base_size = *srf->sizes;
797 	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
798 	srf->multisample_count = 0;
799 	srf->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
800 	srf->quality_level = SVGA3D_MS_QUALITY_NONE;
801 
802 	cur_bo_offset = 0;
803 	cur_offset = srf->offsets;
804 	cur_size = srf->sizes;
805 
806 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
807 		for (j = 0; j < srf->mip_levels[i]; ++j) {
808 			uint32_t stride = svga3dsurface_calculate_pitch
809 				(desc, cur_size);
810 
811 			cur_offset->face = i;
812 			cur_offset->mip = j;
813 			cur_offset->bo_offset = cur_bo_offset;
814 			cur_bo_offset += svga3dsurface_get_image_buffer_size
815 				(desc, cur_size, stride);
816 			++cur_offset;
817 			++cur_size;
818 		}
819 	}
820 	res->backup_size = cur_bo_offset;
821 	if (srf->scanout &&
822 	    srf->num_sizes == 1 &&
823 	    srf->sizes[0].width == 64 &&
824 	    srf->sizes[0].height == 64 &&
825 	    srf->format == SVGA3D_A8R8G8B8) {
826 
827 		srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
828 		if (!srf->snooper.image) {
829 			DRM_ERROR("Failed to allocate cursor_image\n");
830 			ret = -ENOMEM;
831 			goto out_no_copy;
832 		}
833 	} else {
834 		srf->snooper.image = NULL;
835 	}
836 
837 	user_srf->prime.base.shareable = false;
838 	user_srf->prime.base.tfile = NULL;
839 	if (drm_is_primary_client(file_priv))
840 		user_srf->master = drm_master_get(file_priv->master);
841 
842 	/**
843 	 * From this point, the generic resource management functions
844 	 * destroy the object on failure.
845 	 */
846 
847 	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
848 	if (unlikely(ret != 0))
849 		goto out_unlock;
850 
851 	/*
852 	 * A gb-aware client referencing a shared surface will
853 	 * expect a backup buffer to be present.
854 	 */
855 	if (dev_priv->has_mob && req->shareable) {
856 		uint32_t backup_handle;
857 
858 		ret = vmw_user_bo_alloc(dev_priv, tfile,
859 					res->backup_size,
860 					true,
861 					&backup_handle,
862 					&res->backup,
863 					&user_srf->backup_base);
864 		if (unlikely(ret != 0)) {
865 			vmw_resource_unreference(&res);
866 			goto out_unlock;
867 		}
868 	}
869 
870 	tmp = vmw_resource_reference(&srf->res);
871 	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
872 				    req->shareable, VMW_RES_SURFACE,
873 				    &vmw_user_surface_base_release, NULL);
874 
875 	if (unlikely(ret != 0)) {
876 		vmw_resource_unreference(&tmp);
877 		vmw_resource_unreference(&res);
878 		goto out_unlock;
879 	}
880 
881 	rep->sid = user_srf->prime.base.handle;
882 	vmw_resource_unreference(&res);
883 
884 	ttm_read_unlock(&dev_priv->reservation_sem);
885 	return 0;
886 out_no_copy:
887 	kfree(srf->offsets);
888 out_no_offsets:
889 	kfree(srf->sizes);
890 out_no_sizes:
891 	ttm_prime_object_kfree(user_srf, prime);
892 out_no_user_srf:
893 	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
894 out_unlock:
895 	ttm_read_unlock(&dev_priv->reservation_sem);
896 	return ret;
897 }
898 
899 
900 static int
vmw_surface_handle_reference(struct vmw_private * dev_priv,struct drm_file * file_priv,uint32_t u_handle,enum drm_vmw_handle_type handle_type,struct ttm_base_object ** base_p)901 vmw_surface_handle_reference(struct vmw_private *dev_priv,
902 			     struct drm_file *file_priv,
903 			     uint32_t u_handle,
904 			     enum drm_vmw_handle_type handle_type,
905 			     struct ttm_base_object **base_p)
906 {
907 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
908 	struct vmw_user_surface *user_srf;
909 	uint32_t handle;
910 	struct ttm_base_object *base;
911 	int ret;
912 	bool require_exist = false;
913 
914 	if (handle_type == DRM_VMW_HANDLE_PRIME) {
915 		ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
916 		if (unlikely(ret != 0))
917 			return ret;
918 	} else {
919 		if (unlikely(drm_is_render_client(file_priv)))
920 			require_exist = true;
921 
922 		handle = u_handle;
923 	}
924 
925 	ret = -EINVAL;
926 	base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
927 	if (unlikely(!base)) {
928 		VMW_DEBUG_USER("Could not find surface to reference.\n");
929 		goto out_no_lookup;
930 	}
931 
932 	if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
933 		VMW_DEBUG_USER("Referenced object is not a surface.\n");
934 		goto out_bad_resource;
935 	}
936 
937 	if (handle_type != DRM_VMW_HANDLE_PRIME) {
938 		user_srf = container_of(base, struct vmw_user_surface,
939 					prime.base);
940 
941 		/*
942 		 * Make sure the surface creator has the same
943 		 * authenticating master, or is already registered with us.
944 		 */
945 		if (drm_is_primary_client(file_priv) &&
946 		    user_srf->master != file_priv->master)
947 			require_exist = true;
948 
949 		ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
950 					 require_exist);
951 		if (unlikely(ret != 0)) {
952 			DRM_ERROR("Could not add a reference to a surface.\n");
953 			goto out_bad_resource;
954 		}
955 	}
956 
957 	*base_p = base;
958 	return 0;
959 
960 out_bad_resource:
961 	ttm_base_object_unref(&base);
962 out_no_lookup:
963 	if (handle_type == DRM_VMW_HANDLE_PRIME)
964 		(void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
965 
966 	return ret;
967 }
968 
969 /**
970  * vmw_user_surface_define_ioctl - Ioctl function implementing
971  *                                  the user surface reference functionality.
972  *
973  * @dev:            Pointer to a struct drm_device.
974  * @data:           Pointer to data copied from / to user-space.
975  * @file_priv:      Pointer to a drm file private structure.
976  */
vmw_surface_reference_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)977 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
978 				struct drm_file *file_priv)
979 {
980 	struct vmw_private *dev_priv = vmw_priv(dev);
981 	union drm_vmw_surface_reference_arg *arg =
982 	    (union drm_vmw_surface_reference_arg *)data;
983 	struct drm_vmw_surface_arg *req = &arg->req;
984 	struct drm_vmw_surface_create_req *rep = &arg->rep;
985 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
986 	struct vmw_surface *srf;
987 	struct vmw_user_surface *user_srf;
988 	struct drm_vmw_size __user *user_sizes;
989 	struct ttm_base_object *base;
990 	int ret;
991 
992 	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
993 					   req->handle_type, &base);
994 	if (unlikely(ret != 0))
995 		return ret;
996 
997 	user_srf = container_of(base, struct vmw_user_surface, prime.base);
998 	srf = &user_srf->srf;
999 
1000 	/* Downcast of flags when sending back to user space */
1001 	rep->flags = (uint32_t)srf->flags;
1002 	rep->format = srf->format;
1003 	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
1004 	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1005 	    rep->size_addr;
1006 
1007 	if (user_sizes)
1008 		ret = copy_to_user(user_sizes, &srf->base_size,
1009 				   sizeof(srf->base_size));
1010 	if (unlikely(ret != 0)) {
1011 		VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
1012 			       srf->num_sizes);
1013 		ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE);
1014 		ret = -EFAULT;
1015 	}
1016 
1017 	ttm_base_object_unref(&base);
1018 
1019 	return ret;
1020 }
1021 
1022 /**
1023  * vmw_surface_define_encode - Encode a surface_define command.
1024  *
1025  * @srf: Pointer to a struct vmw_surface object.
1026  * @cmd_space: Pointer to memory area in which the commands should be encoded.
1027  */
vmw_gb_surface_create(struct vmw_resource * res)1028 static int vmw_gb_surface_create(struct vmw_resource *res)
1029 {
1030 	struct vmw_private *dev_priv = res->dev_priv;
1031 	struct vmw_surface *srf = vmw_res_to_srf(res);
1032 	uint32_t cmd_len, cmd_id, submit_len;
1033 	int ret;
1034 	struct {
1035 		SVGA3dCmdHeader header;
1036 		SVGA3dCmdDefineGBSurface body;
1037 	} *cmd;
1038 	struct {
1039 		SVGA3dCmdHeader header;
1040 		SVGA3dCmdDefineGBSurface_v2 body;
1041 	} *cmd2;
1042 	struct {
1043 		SVGA3dCmdHeader header;
1044 		SVGA3dCmdDefineGBSurface_v3 body;
1045 	} *cmd3;
1046 
1047 	if (likely(res->id != -1))
1048 		return 0;
1049 
1050 	vmw_fifo_resource_inc(dev_priv);
1051 	ret = vmw_resource_alloc_id(res);
1052 	if (unlikely(ret != 0)) {
1053 		DRM_ERROR("Failed to allocate a surface id.\n");
1054 		goto out_no_id;
1055 	}
1056 
1057 	if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
1058 		ret = -EBUSY;
1059 		goto out_no_fifo;
1060 	}
1061 
1062 	if (dev_priv->has_sm4_1 && srf->array_size > 0) {
1063 		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3;
1064 		cmd_len = sizeof(cmd3->body);
1065 		submit_len = sizeof(*cmd3);
1066 	} else if (srf->array_size > 0) {
1067 		/* has_dx checked on creation time. */
1068 		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
1069 		cmd_len = sizeof(cmd2->body);
1070 		submit_len = sizeof(*cmd2);
1071 	} else {
1072 		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
1073 		cmd_len = sizeof(cmd->body);
1074 		submit_len = sizeof(*cmd);
1075 	}
1076 
1077 	cmd = VMW_FIFO_RESERVE(dev_priv, submit_len);
1078 	cmd2 = (typeof(cmd2))cmd;
1079 	cmd3 = (typeof(cmd3))cmd;
1080 	if (unlikely(!cmd)) {
1081 		ret = -ENOMEM;
1082 		goto out_no_fifo;
1083 	}
1084 
1085 	if (dev_priv->has_sm4_1 && srf->array_size > 0) {
1086 		cmd3->header.id = cmd_id;
1087 		cmd3->header.size = cmd_len;
1088 		cmd3->body.sid = srf->res.id;
1089 		cmd3->body.surfaceFlags = srf->flags;
1090 		cmd3->body.format = srf->format;
1091 		cmd3->body.numMipLevels = srf->mip_levels[0];
1092 		cmd3->body.multisampleCount = srf->multisample_count;
1093 		cmd3->body.multisamplePattern = srf->multisample_pattern;
1094 		cmd3->body.qualityLevel = srf->quality_level;
1095 		cmd3->body.autogenFilter = srf->autogen_filter;
1096 		cmd3->body.size.width = srf->base_size.width;
1097 		cmd3->body.size.height = srf->base_size.height;
1098 		cmd3->body.size.depth = srf->base_size.depth;
1099 		cmd3->body.arraySize = srf->array_size;
1100 	} else if (srf->array_size > 0) {
1101 		cmd2->header.id = cmd_id;
1102 		cmd2->header.size = cmd_len;
1103 		cmd2->body.sid = srf->res.id;
1104 		cmd2->body.surfaceFlags = srf->flags;
1105 		cmd2->body.format = srf->format;
1106 		cmd2->body.numMipLevels = srf->mip_levels[0];
1107 		cmd2->body.multisampleCount = srf->multisample_count;
1108 		cmd2->body.autogenFilter = srf->autogen_filter;
1109 		cmd2->body.size.width = srf->base_size.width;
1110 		cmd2->body.size.height = srf->base_size.height;
1111 		cmd2->body.size.depth = srf->base_size.depth;
1112 		cmd2->body.arraySize = srf->array_size;
1113 	} else {
1114 		cmd->header.id = cmd_id;
1115 		cmd->header.size = cmd_len;
1116 		cmd->body.sid = srf->res.id;
1117 		cmd->body.surfaceFlags = srf->flags;
1118 		cmd->body.format = srf->format;
1119 		cmd->body.numMipLevels = srf->mip_levels[0];
1120 		cmd->body.multisampleCount = srf->multisample_count;
1121 		cmd->body.autogenFilter = srf->autogen_filter;
1122 		cmd->body.size.width = srf->base_size.width;
1123 		cmd->body.size.height = srf->base_size.height;
1124 		cmd->body.size.depth = srf->base_size.depth;
1125 	}
1126 
1127 	vmw_fifo_commit(dev_priv, submit_len);
1128 
1129 	return 0;
1130 
1131 out_no_fifo:
1132 	vmw_resource_release_id(res);
1133 out_no_id:
1134 	vmw_fifo_resource_dec(dev_priv);
1135 	return ret;
1136 }
1137 
1138 
vmw_gb_surface_bind(struct vmw_resource * res,struct ttm_validate_buffer * val_buf)1139 static int vmw_gb_surface_bind(struct vmw_resource *res,
1140 			       struct ttm_validate_buffer *val_buf)
1141 {
1142 	struct vmw_private *dev_priv = res->dev_priv;
1143 	struct {
1144 		SVGA3dCmdHeader header;
1145 		SVGA3dCmdBindGBSurface body;
1146 	} *cmd1;
1147 	struct {
1148 		SVGA3dCmdHeader header;
1149 		SVGA3dCmdUpdateGBSurface body;
1150 	} *cmd2;
1151 	uint32_t submit_size;
1152 	struct ttm_buffer_object *bo = val_buf->bo;
1153 
1154 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1155 
1156 	submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
1157 
1158 	cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
1159 	if (unlikely(!cmd1))
1160 		return -ENOMEM;
1161 
1162 	cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1163 	cmd1->header.size = sizeof(cmd1->body);
1164 	cmd1->body.sid = res->id;
1165 	cmd1->body.mobid = bo->mem.start;
1166 	if (res->backup_dirty) {
1167 		cmd2 = (void *) &cmd1[1];
1168 		cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
1169 		cmd2->header.size = sizeof(cmd2->body);
1170 		cmd2->body.sid = res->id;
1171 		res->backup_dirty = false;
1172 	}
1173 	vmw_fifo_commit(dev_priv, submit_size);
1174 
1175 	return 0;
1176 }
1177 
vmw_gb_surface_unbind(struct vmw_resource * res,bool readback,struct ttm_validate_buffer * val_buf)1178 static int vmw_gb_surface_unbind(struct vmw_resource *res,
1179 				 bool readback,
1180 				 struct ttm_validate_buffer *val_buf)
1181 {
1182 	struct vmw_private *dev_priv = res->dev_priv;
1183 	struct ttm_buffer_object *bo = val_buf->bo;
1184 	struct vmw_fence_obj *fence;
1185 
1186 	struct {
1187 		SVGA3dCmdHeader header;
1188 		SVGA3dCmdReadbackGBSurface body;
1189 	} *cmd1;
1190 	struct {
1191 		SVGA3dCmdHeader header;
1192 		SVGA3dCmdInvalidateGBSurface body;
1193 	} *cmd2;
1194 	struct {
1195 		SVGA3dCmdHeader header;
1196 		SVGA3dCmdBindGBSurface body;
1197 	} *cmd3;
1198 	uint32_t submit_size;
1199 	uint8_t *cmd;
1200 
1201 
1202 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1203 
1204 	submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
1205 	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
1206 	if (unlikely(!cmd))
1207 		return -ENOMEM;
1208 
1209 	if (readback) {
1210 		cmd1 = (void *) cmd;
1211 		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
1212 		cmd1->header.size = sizeof(cmd1->body);
1213 		cmd1->body.sid = res->id;
1214 		cmd3 = (void *) &cmd1[1];
1215 	} else {
1216 		cmd2 = (void *) cmd;
1217 		cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
1218 		cmd2->header.size = sizeof(cmd2->body);
1219 		cmd2->body.sid = res->id;
1220 		cmd3 = (void *) &cmd2[1];
1221 	}
1222 
1223 	cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1224 	cmd3->header.size = sizeof(cmd3->body);
1225 	cmd3->body.sid = res->id;
1226 	cmd3->body.mobid = SVGA3D_INVALID_ID;
1227 
1228 	vmw_fifo_commit(dev_priv, submit_size);
1229 
1230 	/*
1231 	 * Create a fence object and fence the backup buffer.
1232 	 */
1233 
1234 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
1235 					  &fence, NULL);
1236 
1237 	vmw_bo_fence_single(val_buf->bo, fence);
1238 
1239 	if (likely(fence != NULL))
1240 		vmw_fence_obj_unreference(&fence);
1241 
1242 	return 0;
1243 }
1244 
vmw_gb_surface_destroy(struct vmw_resource * res)1245 static int vmw_gb_surface_destroy(struct vmw_resource *res)
1246 {
1247 	struct vmw_private *dev_priv = res->dev_priv;
1248 	struct vmw_surface *srf = vmw_res_to_srf(res);
1249 	struct {
1250 		SVGA3dCmdHeader header;
1251 		SVGA3dCmdDestroyGBSurface body;
1252 	} *cmd;
1253 
1254 	if (likely(res->id == -1))
1255 		return 0;
1256 
1257 	mutex_lock(&dev_priv->binding_mutex);
1258 	vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
1259 	vmw_binding_res_list_scrub(&res->binding_head);
1260 
1261 	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
1262 	if (unlikely(!cmd)) {
1263 		mutex_unlock(&dev_priv->binding_mutex);
1264 		return -ENOMEM;
1265 	}
1266 
1267 	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
1268 	cmd->header.size = sizeof(cmd->body);
1269 	cmd->body.sid = res->id;
1270 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
1271 	mutex_unlock(&dev_priv->binding_mutex);
1272 	vmw_resource_release_id(res);
1273 	vmw_fifo_resource_dec(dev_priv);
1274 
1275 	return 0;
1276 }
1277 
1278 
1279 /**
1280  * vmw_gb_surface_define_ioctl - Ioctl function implementing
1281  * the user surface define functionality.
1282  *
1283  * @dev: Pointer to a struct drm_device.
1284  * @data: Pointer to data copied from / to user-space.
1285  * @file_priv: Pointer to a drm file private structure.
1286  */
vmw_gb_surface_define_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1287 int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1288 				struct drm_file *file_priv)
1289 {
1290 	union drm_vmw_gb_surface_create_arg *arg =
1291 	    (union drm_vmw_gb_surface_create_arg *)data;
1292 	struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1293 	struct drm_vmw_gb_surface_create_ext_req req_ext;
1294 
1295 	req_ext.base = arg->req;
1296 	req_ext.version = drm_vmw_gb_surface_v1;
1297 	req_ext.svga3d_flags_upper_32_bits = 0;
1298 	req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE;
1299 	req_ext.quality_level = SVGA3D_MS_QUALITY_NONE;
1300 	req_ext.must_be_zero = 0;
1301 
1302 	return vmw_gb_surface_define_internal(dev, &req_ext, rep, file_priv);
1303 }
1304 
1305 /**
1306  * vmw_gb_surface_reference_ioctl - Ioctl function implementing
1307  * the user surface reference functionality.
1308  *
1309  * @dev: Pointer to a struct drm_device.
1310  * @data: Pointer to data copied from / to user-space.
1311  * @file_priv: Pointer to a drm file private structure.
1312  */
vmw_gb_surface_reference_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1313 int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1314 				   struct drm_file *file_priv)
1315 {
1316 	union drm_vmw_gb_surface_reference_arg *arg =
1317 	    (union drm_vmw_gb_surface_reference_arg *)data;
1318 	struct drm_vmw_surface_arg *req = &arg->req;
1319 	struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
1320 	struct drm_vmw_gb_surface_ref_ext_rep rep_ext;
1321 	int ret;
1322 
1323 	ret = vmw_gb_surface_reference_internal(dev, req, &rep_ext, file_priv);
1324 
1325 	if (unlikely(ret != 0))
1326 		return ret;
1327 
1328 	rep->creq = rep_ext.creq.base;
1329 	rep->crep = rep_ext.crep;
1330 
1331 	return ret;
1332 }
1333 
1334 /**
1335  * vmw_surface_gb_priv_define - Define a private GB surface
1336  *
1337  * @dev:  Pointer to a struct drm_device
1338  * @user_accounting_size:  Used to track user-space memory usage, set
1339  *                         to 0 for kernel mode only memory
1340  * @svga3d_flags: SVGA3d surface flags for the device
1341  * @format: requested surface format
1342  * @for_scanout: true if inteded to be used for scanout buffer
1343  * @num_mip_levels:  number of MIP levels
1344  * @multisample_count:
1345  * @array_size: Surface array size.
1346  * @size: width, heigh, depth of the surface requested
1347  * @multisample_pattern: Multisampling pattern when msaa is supported
1348  * @quality_level: Precision settings
1349  * @user_srf_out: allocated user_srf.  Set to NULL on failure.
1350  *
1351  * GB surfaces allocated by this function will not have a user mode handle, and
1352  * thus will only be visible to vmwgfx.  For optimization reasons the
1353  * surface may later be given a user mode handle by another function to make
1354  * it available to user mode drivers.
1355  */
vmw_surface_gb_priv_define(struct drm_device * dev,uint32_t user_accounting_size,SVGA3dSurfaceAllFlags svga3d_flags,SVGA3dSurfaceFormat format,bool for_scanout,uint32_t num_mip_levels,uint32_t multisample_count,uint32_t array_size,struct drm_vmw_size size,SVGA3dMSPattern multisample_pattern,SVGA3dMSQualityLevel quality_level,struct vmw_surface ** srf_out)1356 int vmw_surface_gb_priv_define(struct drm_device *dev,
1357 			       uint32_t user_accounting_size,
1358 			       SVGA3dSurfaceAllFlags svga3d_flags,
1359 			       SVGA3dSurfaceFormat format,
1360 			       bool for_scanout,
1361 			       uint32_t num_mip_levels,
1362 			       uint32_t multisample_count,
1363 			       uint32_t array_size,
1364 			       struct drm_vmw_size size,
1365 			       SVGA3dMSPattern multisample_pattern,
1366 			       SVGA3dMSQualityLevel quality_level,
1367 			       struct vmw_surface **srf_out)
1368 {
1369 	struct vmw_private *dev_priv = vmw_priv(dev);
1370 	struct vmw_user_surface *user_srf;
1371 	struct ttm_operation_ctx ctx = {
1372 		.interruptible = true,
1373 		.no_wait_gpu = false
1374 	};
1375 	struct vmw_surface *srf;
1376 	int ret;
1377 	u32 num_layers = 1;
1378 	u32 sample_count = 1;
1379 
1380 	*srf_out = NULL;
1381 
1382 	if (for_scanout) {
1383 		if (!svga3dsurface_is_screen_target_format(format)) {
1384 			VMW_DEBUG_USER("Invalid Screen Target surface format.");
1385 			return -EINVAL;
1386 		}
1387 
1388 		if (size.width > dev_priv->texture_max_width ||
1389 		    size.height > dev_priv->texture_max_height) {
1390 			VMW_DEBUG_USER("%ux%u\n, exceeds max surface size %ux%u",
1391 				       size.width, size.height,
1392 				       dev_priv->texture_max_width,
1393 				       dev_priv->texture_max_height);
1394 			return -EINVAL;
1395 		}
1396 	} else {
1397 		const struct svga3d_surface_desc *desc;
1398 
1399 		desc = svga3dsurface_get_desc(format);
1400 		if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
1401 			VMW_DEBUG_USER("Invalid surface format.\n");
1402 			return -EINVAL;
1403 		}
1404 	}
1405 
1406 	/* array_size must be null for non-GL3 host. */
1407 	if (array_size > 0 && !dev_priv->has_dx) {
1408 		VMW_DEBUG_USER("Tried to create DX surface on non-DX host.\n");
1409 		return -EINVAL;
1410 	}
1411 
1412 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1413 	if (unlikely(ret != 0))
1414 		return ret;
1415 
1416 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1417 				   user_accounting_size, &ctx);
1418 	if (unlikely(ret != 0)) {
1419 		if (ret != -ERESTARTSYS)
1420 			DRM_ERROR("Out of graphics memory for surface"
1421 				  " creation.\n");
1422 		goto out_unlock;
1423 	}
1424 
1425 	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
1426 	if (unlikely(!user_srf)) {
1427 		ret = -ENOMEM;
1428 		goto out_no_user_srf;
1429 	}
1430 
1431 	*srf_out  = &user_srf->srf;
1432 	user_srf->size = user_accounting_size;
1433 	user_srf->prime.base.shareable = false;
1434 	user_srf->prime.base.tfile     = NULL;
1435 
1436 	srf = &user_srf->srf;
1437 	srf->flags             = svga3d_flags;
1438 	srf->format            = format;
1439 	srf->scanout           = for_scanout;
1440 	srf->mip_levels[0]     = num_mip_levels;
1441 	srf->num_sizes         = 1;
1442 	srf->sizes             = NULL;
1443 	srf->offsets           = NULL;
1444 	srf->base_size         = size;
1445 	srf->autogen_filter    = SVGA3D_TEX_FILTER_NONE;
1446 	srf->array_size        = array_size;
1447 	srf->multisample_count = multisample_count;
1448 	srf->multisample_pattern = multisample_pattern;
1449 	srf->quality_level = quality_level;
1450 
1451 	if (array_size)
1452 		num_layers = array_size;
1453 	else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
1454 		num_layers = SVGA3D_MAX_SURFACE_FACES;
1455 
1456 	if (srf->flags & SVGA3D_SURFACE_MULTISAMPLE)
1457 		sample_count = srf->multisample_count;
1458 
1459 	srf->res.backup_size   =
1460 		svga3dsurface_get_serialized_size_extended(srf->format,
1461 							   srf->base_size,
1462 							   srf->mip_levels[0],
1463 							   num_layers,
1464 							   sample_count);
1465 
1466 	if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
1467 		srf->res.backup_size += sizeof(SVGA3dDXSOState);
1468 
1469 	/*
1470 	 * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
1471 	 * size greater than STDU max width/height. This is really a workaround
1472 	 * to support creation of big framebuffer requested by some user-space
1473 	 * for whole topology. That big framebuffer won't really be used for
1474 	 * binding with screen target as during prepare_fb a separate surface is
1475 	 * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
1476 	 */
1477 	if (dev_priv->active_display_unit == vmw_du_screen_target &&
1478 	    for_scanout && size.width <= dev_priv->stdu_max_width &&
1479 	    size.height <= dev_priv->stdu_max_height)
1480 		srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
1481 
1482 	/*
1483 	 * From this point, the generic resource management functions
1484 	 * destroy the object on failure.
1485 	 */
1486 	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1487 
1488 	ttm_read_unlock(&dev_priv->reservation_sem);
1489 	return ret;
1490 
1491 out_no_user_srf:
1492 	ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
1493 
1494 out_unlock:
1495 	ttm_read_unlock(&dev_priv->reservation_sem);
1496 	return ret;
1497 }
1498 
1499 /**
1500  * vmw_gb_surface_define_ext_ioctl - Ioctl function implementing
1501  * the user surface define functionality.
1502  *
1503  * @dev: Pointer to a struct drm_device.
1504  * @data: Pointer to data copied from / to user-space.
1505  * @file_priv: Pointer to a drm file private structure.
1506  */
vmw_gb_surface_define_ext_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1507 int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data,
1508 				struct drm_file *file_priv)
1509 {
1510 	union drm_vmw_gb_surface_create_ext_arg *arg =
1511 	    (union drm_vmw_gb_surface_create_ext_arg *)data;
1512 	struct drm_vmw_gb_surface_create_ext_req *req = &arg->req;
1513 	struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1514 
1515 	return vmw_gb_surface_define_internal(dev, req, rep, file_priv);
1516 }
1517 
1518 /**
1519  * vmw_gb_surface_reference_ext_ioctl - Ioctl function implementing
1520  * the user surface reference functionality.
1521  *
1522  * @dev: Pointer to a struct drm_device.
1523  * @data: Pointer to data copied from / to user-space.
1524  * @file_priv: Pointer to a drm file private structure.
1525  */
vmw_gb_surface_reference_ext_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1526 int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, void *data,
1527 				   struct drm_file *file_priv)
1528 {
1529 	union drm_vmw_gb_surface_reference_ext_arg *arg =
1530 	    (union drm_vmw_gb_surface_reference_ext_arg *)data;
1531 	struct drm_vmw_surface_arg *req = &arg->req;
1532 	struct drm_vmw_gb_surface_ref_ext_rep *rep = &arg->rep;
1533 
1534 	return vmw_gb_surface_reference_internal(dev, req, rep, file_priv);
1535 }
1536 
1537 /**
1538  * vmw_gb_surface_define_internal - Ioctl function implementing
1539  * the user surface define functionality.
1540  *
1541  * @dev: Pointer to a struct drm_device.
1542  * @req: Request argument from user-space.
1543  * @rep: Response argument to user-space.
1544  * @file_priv: Pointer to a drm file private structure.
1545  */
1546 static int
vmw_gb_surface_define_internal(struct drm_device * dev,struct drm_vmw_gb_surface_create_ext_req * req,struct drm_vmw_gb_surface_create_rep * rep,struct drm_file * file_priv)1547 vmw_gb_surface_define_internal(struct drm_device *dev,
1548 			       struct drm_vmw_gb_surface_create_ext_req *req,
1549 			       struct drm_vmw_gb_surface_create_rep *rep,
1550 			       struct drm_file *file_priv)
1551 {
1552 	struct vmw_private *dev_priv = vmw_priv(dev);
1553 	struct vmw_user_surface *user_srf;
1554 	struct vmw_surface *srf;
1555 	struct vmw_resource *res;
1556 	struct vmw_resource *tmp;
1557 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1558 	int ret;
1559 	uint32_t size;
1560 	uint32_t backup_handle = 0;
1561 	SVGA3dSurfaceAllFlags svga3d_flags_64 =
1562 		SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
1563 				req->base.svga3d_flags);
1564 
1565 	if (!dev_priv->has_sm4_1) {
1566 		/*
1567 		 * If SM4_1 is not support then cannot send 64-bit flag to
1568 		 * device.
1569 		 */
1570 		if (req->svga3d_flags_upper_32_bits != 0)
1571 			return -EINVAL;
1572 
1573 		if (req->base.multisample_count != 0)
1574 			return -EINVAL;
1575 
1576 		if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE)
1577 			return -EINVAL;
1578 
1579 		if (req->quality_level != SVGA3D_MS_QUALITY_NONE)
1580 			return -EINVAL;
1581 	}
1582 
1583 	if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) &&
1584 	    req->base.multisample_count == 0)
1585 		return -EINVAL;
1586 
1587 	if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS)
1588 		return -EINVAL;
1589 
1590 	if (unlikely(vmw_user_surface_size == 0))
1591 		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1592 			VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
1593 
1594 	size = vmw_user_surface_size;
1595 
1596 	/* Define a surface based on the parameters. */
1597 	ret = vmw_surface_gb_priv_define(dev,
1598 					 size,
1599 					 svga3d_flags_64,
1600 					 req->base.format,
1601 					 req->base.drm_surface_flags &
1602 					 drm_vmw_surface_flag_scanout,
1603 					 req->base.mip_levels,
1604 					 req->base.multisample_count,
1605 					 req->base.array_size,
1606 					 req->base.base_size,
1607 					 req->multisample_pattern,
1608 					 req->quality_level,
1609 					 &srf);
1610 	if (unlikely(ret != 0))
1611 		return ret;
1612 
1613 	user_srf = container_of(srf, struct vmw_user_surface, srf);
1614 	if (drm_is_primary_client(file_priv))
1615 		user_srf->master = drm_master_get(file_priv->master);
1616 
1617 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1618 	if (unlikely(ret != 0))
1619 		return ret;
1620 
1621 	res = &user_srf->srf.res;
1622 
1623 	if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
1624 		ret = vmw_user_bo_lookup(tfile, req->base.buffer_handle,
1625 					 &res->backup,
1626 					 &user_srf->backup_base);
1627 		if (ret == 0) {
1628 			if (res->backup->base.num_pages * PAGE_SIZE <
1629 			    res->backup_size) {
1630 				VMW_DEBUG_USER("Surface backup buffer too small.\n");
1631 				vmw_bo_unreference(&res->backup);
1632 				ret = -EINVAL;
1633 				goto out_unlock;
1634 			} else {
1635 				backup_handle = req->base.buffer_handle;
1636 			}
1637 		}
1638 	} else if (req->base.drm_surface_flags &
1639 		   drm_vmw_surface_flag_create_buffer)
1640 		ret = vmw_user_bo_alloc(dev_priv, tfile,
1641 					res->backup_size,
1642 					req->base.drm_surface_flags &
1643 					drm_vmw_surface_flag_shareable,
1644 					&backup_handle,
1645 					&res->backup,
1646 					&user_srf->backup_base);
1647 
1648 	if (unlikely(ret != 0)) {
1649 		vmw_resource_unreference(&res);
1650 		goto out_unlock;
1651 	}
1652 
1653 	tmp = vmw_resource_reference(res);
1654 	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
1655 				    req->base.drm_surface_flags &
1656 				    drm_vmw_surface_flag_shareable,
1657 				    VMW_RES_SURFACE,
1658 				    &vmw_user_surface_base_release, NULL);
1659 
1660 	if (unlikely(ret != 0)) {
1661 		vmw_resource_unreference(&tmp);
1662 		vmw_resource_unreference(&res);
1663 		goto out_unlock;
1664 	}
1665 
1666 	rep->handle      = user_srf->prime.base.handle;
1667 	rep->backup_size = res->backup_size;
1668 	if (res->backup) {
1669 		rep->buffer_map_handle =
1670 			drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
1671 		rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
1672 		rep->buffer_handle = backup_handle;
1673 	} else {
1674 		rep->buffer_map_handle = 0;
1675 		rep->buffer_size = 0;
1676 		rep->buffer_handle = SVGA3D_INVALID_ID;
1677 	}
1678 
1679 	vmw_resource_unreference(&res);
1680 
1681 out_unlock:
1682 	ttm_read_unlock(&dev_priv->reservation_sem);
1683 	return ret;
1684 }
1685 
1686 /**
1687  * vmw_gb_surface_reference_internal - Ioctl function implementing
1688  * the user surface reference functionality.
1689  *
1690  * @dev: Pointer to a struct drm_device.
1691  * @req: Pointer to user-space request surface arg.
1692  * @rep: Pointer to response to user-space.
1693  * @file_priv: Pointer to a drm file private structure.
1694  */
1695 static int
vmw_gb_surface_reference_internal(struct drm_device * dev,struct drm_vmw_surface_arg * req,struct drm_vmw_gb_surface_ref_ext_rep * rep,struct drm_file * file_priv)1696 vmw_gb_surface_reference_internal(struct drm_device *dev,
1697 				  struct drm_vmw_surface_arg *req,
1698 				  struct drm_vmw_gb_surface_ref_ext_rep *rep,
1699 				  struct drm_file *file_priv)
1700 {
1701 	struct vmw_private *dev_priv = vmw_priv(dev);
1702 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1703 	struct vmw_surface *srf;
1704 	struct vmw_user_surface *user_srf;
1705 	struct ttm_base_object *base;
1706 	uint32_t backup_handle;
1707 	int ret = -EINVAL;
1708 
1709 	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
1710 					   req->handle_type, &base);
1711 	if (unlikely(ret != 0))
1712 		return ret;
1713 
1714 	user_srf = container_of(base, struct vmw_user_surface, prime.base);
1715 	srf = &user_srf->srf;
1716 	if (!srf->res.backup) {
1717 		DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
1718 		goto out_bad_resource;
1719 	}
1720 
1721 	mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
1722 	ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle);
1723 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1724 
1725 	if (unlikely(ret != 0)) {
1726 		DRM_ERROR("Could not add a reference to a GB surface "
1727 			  "backup buffer.\n");
1728 		(void) ttm_ref_object_base_unref(tfile, base->handle,
1729 						 TTM_REF_USAGE);
1730 		goto out_bad_resource;
1731 	}
1732 
1733 	rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(srf->flags);
1734 	rep->creq.base.format = srf->format;
1735 	rep->creq.base.mip_levels = srf->mip_levels[0];
1736 	rep->creq.base.drm_surface_flags = 0;
1737 	rep->creq.base.multisample_count = srf->multisample_count;
1738 	rep->creq.base.autogen_filter = srf->autogen_filter;
1739 	rep->creq.base.array_size = srf->array_size;
1740 	rep->creq.base.buffer_handle = backup_handle;
1741 	rep->creq.base.base_size = srf->base_size;
1742 	rep->crep.handle = user_srf->prime.base.handle;
1743 	rep->crep.backup_size = srf->res.backup_size;
1744 	rep->crep.buffer_handle = backup_handle;
1745 	rep->crep.buffer_map_handle =
1746 		drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
1747 	rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
1748 
1749 	rep->creq.version = drm_vmw_gb_surface_v1;
1750 	rep->creq.svga3d_flags_upper_32_bits =
1751 		SVGA3D_FLAGS_UPPER_32(srf->flags);
1752 	rep->creq.multisample_pattern = srf->multisample_pattern;
1753 	rep->creq.quality_level = srf->quality_level;
1754 	rep->creq.must_be_zero = 0;
1755 
1756 out_bad_resource:
1757 	ttm_base_object_unref(&base);
1758 
1759 	return ret;
1760 }
1761