1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 
29 #include "vmwgfx_drv.h"
30 
31 #include "vmwgfx_bo.h"
32 #include "vmwgfx_binding.h"
33 #include "vmwgfx_devcaps.h"
34 #include "vmwgfx_mksstat.h"
35 #include "ttm_object.h"
36 
37 #include <drm/drm_aperture.h>
38 #include <drm/drm_drv.h>
39 #include <drm/drm_fbdev_generic.h>
40 #include <drm/drm_gem_ttm_helper.h>
41 #include <drm/drm_ioctl.h>
42 #include <drm/drm_module.h>
43 #include <drm/drm_sysfs.h>
44 #include <drm/ttm/ttm_range_manager.h>
45 #include <drm/ttm/ttm_placement.h>
46 #include <generated/utsrelease.h>
47 
48 #ifdef CONFIG_X86
49 #include <asm/hypervisor.h>
50 #endif
51 #include <linux/cc_platform.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/module.h>
54 #include <linux/pci.h>
55 #include <linux/version.h>
56 
57 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
58 
59 /*
60  * Fully encoded drm commands. Might move to vmw_drm.h
61  */
62 
63 #define DRM_IOCTL_VMW_GET_PARAM					\
64 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,		\
65 		 struct drm_vmw_getparam_arg)
66 #define DRM_IOCTL_VMW_ALLOC_DMABUF				\
67 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,	\
68 		union drm_vmw_alloc_dmabuf_arg)
69 #define DRM_IOCTL_VMW_UNREF_DMABUF				\
70 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,	\
71 		struct drm_vmw_unref_dmabuf_arg)
72 #define DRM_IOCTL_VMW_CURSOR_BYPASS				\
73 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,	\
74 		 struct drm_vmw_cursor_bypass_arg)
75 
76 #define DRM_IOCTL_VMW_CONTROL_STREAM				\
77 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,	\
78 		 struct drm_vmw_control_stream_arg)
79 #define DRM_IOCTL_VMW_CLAIM_STREAM				\
80 	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,	\
81 		 struct drm_vmw_stream_arg)
82 #define DRM_IOCTL_VMW_UNREF_STREAM				\
83 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,	\
84 		 struct drm_vmw_stream_arg)
85 
86 #define DRM_IOCTL_VMW_CREATE_CONTEXT				\
87 	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,	\
88 		struct drm_vmw_context_arg)
89 #define DRM_IOCTL_VMW_UNREF_CONTEXT				\
90 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,	\
91 		struct drm_vmw_context_arg)
92 #define DRM_IOCTL_VMW_CREATE_SURFACE				\
93 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,	\
94 		 union drm_vmw_surface_create_arg)
95 #define DRM_IOCTL_VMW_UNREF_SURFACE				\
96 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,	\
97 		 struct drm_vmw_surface_arg)
98 #define DRM_IOCTL_VMW_REF_SURFACE				\
99 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,	\
100 		 union drm_vmw_surface_reference_arg)
101 #define DRM_IOCTL_VMW_EXECBUF					\
102 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,		\
103 		struct drm_vmw_execbuf_arg)
104 #define DRM_IOCTL_VMW_GET_3D_CAP				\
105 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,		\
106 		 struct drm_vmw_get_3d_cap_arg)
107 #define DRM_IOCTL_VMW_FENCE_WAIT				\
108 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,		\
109 		 struct drm_vmw_fence_wait_arg)
110 #define DRM_IOCTL_VMW_FENCE_SIGNALED				\
111 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,	\
112 		 struct drm_vmw_fence_signaled_arg)
113 #define DRM_IOCTL_VMW_FENCE_UNREF				\
114 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,		\
115 		 struct drm_vmw_fence_arg)
116 #define DRM_IOCTL_VMW_FENCE_EVENT				\
117 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,		\
118 		 struct drm_vmw_fence_event_arg)
119 #define DRM_IOCTL_VMW_PRESENT					\
120 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,		\
121 		 struct drm_vmw_present_arg)
122 #define DRM_IOCTL_VMW_PRESENT_READBACK				\
123 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,	\
124 		 struct drm_vmw_present_readback_arg)
125 #define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
126 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
127 		 struct drm_vmw_update_layout_arg)
128 #define DRM_IOCTL_VMW_CREATE_SHADER				\
129 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER,	\
130 		 struct drm_vmw_shader_create_arg)
131 #define DRM_IOCTL_VMW_UNREF_SHADER				\
132 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER,	\
133 		 struct drm_vmw_shader_arg)
134 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE				\
135 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE,	\
136 		 union drm_vmw_gb_surface_create_arg)
137 #define DRM_IOCTL_VMW_GB_SURFACE_REF				\
138 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF,	\
139 		 union drm_vmw_gb_surface_reference_arg)
140 #define DRM_IOCTL_VMW_SYNCCPU					\
141 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,		\
142 		 struct drm_vmw_synccpu_arg)
143 #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT			\
144 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT,	\
145 		struct drm_vmw_context_arg)
146 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT				\
147 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT,	\
148 		union drm_vmw_gb_surface_create_ext_arg)
149 #define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT				\
150 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT,		\
151 		union drm_vmw_gb_surface_reference_ext_arg)
152 #define DRM_IOCTL_VMW_MSG						\
153 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG,			\
154 		struct drm_vmw_msg_arg)
155 #define DRM_IOCTL_VMW_MKSSTAT_RESET				\
156 	DRM_IO(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_RESET)
157 #define DRM_IOCTL_VMW_MKSSTAT_ADD				\
158 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_ADD,	\
159 		struct drm_vmw_mksstat_add_arg)
160 #define DRM_IOCTL_VMW_MKSSTAT_REMOVE				\
161 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_REMOVE,	\
162 		struct drm_vmw_mksstat_remove_arg)
163 
164 /*
165  * Ioctl definitions.
166  */
167 
168 static const struct drm_ioctl_desc vmw_ioctls[] = {
169 	DRM_IOCTL_DEF_DRV(VMW_GET_PARAM, vmw_getparam_ioctl,
170 			  DRM_RENDER_ALLOW),
171 	DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_gem_object_create_ioctl,
172 			  DRM_RENDER_ALLOW),
173 	DRM_IOCTL_DEF_DRV(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
174 			  DRM_RENDER_ALLOW),
175 	DRM_IOCTL_DEF_DRV(VMW_CURSOR_BYPASS,
176 			  vmw_kms_cursor_bypass_ioctl,
177 			  DRM_MASTER),
178 
179 	DRM_IOCTL_DEF_DRV(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
180 			  DRM_MASTER),
181 	DRM_IOCTL_DEF_DRV(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
182 			  DRM_MASTER),
183 	DRM_IOCTL_DEF_DRV(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
184 			  DRM_MASTER),
185 
186 	DRM_IOCTL_DEF_DRV(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
187 			  DRM_RENDER_ALLOW),
188 	DRM_IOCTL_DEF_DRV(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
189 			  DRM_RENDER_ALLOW),
190 	DRM_IOCTL_DEF_DRV(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
191 			  DRM_RENDER_ALLOW),
192 	DRM_IOCTL_DEF_DRV(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
193 			  DRM_RENDER_ALLOW),
194 	DRM_IOCTL_DEF_DRV(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
195 			  DRM_RENDER_ALLOW),
196 	DRM_IOCTL_DEF_DRV(VMW_EXECBUF, vmw_execbuf_ioctl,
197 			  DRM_RENDER_ALLOW),
198 	DRM_IOCTL_DEF_DRV(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
199 			  DRM_RENDER_ALLOW),
200 	DRM_IOCTL_DEF_DRV(VMW_FENCE_SIGNALED,
201 			  vmw_fence_obj_signaled_ioctl,
202 			  DRM_RENDER_ALLOW),
203 	DRM_IOCTL_DEF_DRV(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
204 			  DRM_RENDER_ALLOW),
205 	DRM_IOCTL_DEF_DRV(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
206 			  DRM_RENDER_ALLOW),
207 	DRM_IOCTL_DEF_DRV(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
208 			  DRM_RENDER_ALLOW),
209 
210 	/* these allow direct access to the framebuffers mark as master only */
211 	DRM_IOCTL_DEF_DRV(VMW_PRESENT, vmw_present_ioctl,
212 			  DRM_MASTER | DRM_AUTH),
213 	DRM_IOCTL_DEF_DRV(VMW_PRESENT_READBACK,
214 			  vmw_present_readback_ioctl,
215 			  DRM_MASTER | DRM_AUTH),
216 	/*
217 	 * The permissions of the below ioctl are overridden in
218 	 * vmw_generic_ioctl(). We require either
219 	 * DRM_MASTER or capable(CAP_SYS_ADMIN).
220 	 */
221 	DRM_IOCTL_DEF_DRV(VMW_UPDATE_LAYOUT,
222 			  vmw_kms_update_layout_ioctl,
223 			  DRM_RENDER_ALLOW),
224 	DRM_IOCTL_DEF_DRV(VMW_CREATE_SHADER,
225 			  vmw_shader_define_ioctl,
226 			  DRM_RENDER_ALLOW),
227 	DRM_IOCTL_DEF_DRV(VMW_UNREF_SHADER,
228 			  vmw_shader_destroy_ioctl,
229 			  DRM_RENDER_ALLOW),
230 	DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_CREATE,
231 			  vmw_gb_surface_define_ioctl,
232 			  DRM_RENDER_ALLOW),
233 	DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_REF,
234 			  vmw_gb_surface_reference_ioctl,
235 			  DRM_RENDER_ALLOW),
236 	DRM_IOCTL_DEF_DRV(VMW_SYNCCPU,
237 			  vmw_user_bo_synccpu_ioctl,
238 			  DRM_RENDER_ALLOW),
239 	DRM_IOCTL_DEF_DRV(VMW_CREATE_EXTENDED_CONTEXT,
240 			  vmw_extended_context_define_ioctl,
241 			  DRM_RENDER_ALLOW),
242 	DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_CREATE_EXT,
243 			  vmw_gb_surface_define_ext_ioctl,
244 			  DRM_RENDER_ALLOW),
245 	DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_REF_EXT,
246 			  vmw_gb_surface_reference_ext_ioctl,
247 			  DRM_RENDER_ALLOW),
248 	DRM_IOCTL_DEF_DRV(VMW_MSG,
249 			  vmw_msg_ioctl,
250 			  DRM_RENDER_ALLOW),
251 	DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_RESET,
252 			  vmw_mksstat_reset_ioctl,
253 			  DRM_RENDER_ALLOW),
254 	DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_ADD,
255 			  vmw_mksstat_add_ioctl,
256 			  DRM_RENDER_ALLOW),
257 	DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_REMOVE,
258 			  vmw_mksstat_remove_ioctl,
259 			  DRM_RENDER_ALLOW),
260 };
261 
262 static const struct pci_device_id vmw_pci_id_list[] = {
263 	{ PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA2) },
264 	{ PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA3) },
265 	{ }
266 };
267 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
268 
269 static int vmw_restrict_iommu;
270 static int vmw_force_coherent;
271 static int vmw_restrict_dma_mask;
272 static int vmw_assume_16bpp;
273 
274 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
275 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
276 			      void *ptr);
277 
278 MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
279 module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
280 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
281 module_param_named(force_coherent, vmw_force_coherent, int, 0600);
282 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
283 module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
284 MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
285 module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
286 
287 
288 struct bitmap_name {
289 	uint32 value;
290 	const char *name;
291 };
292 
293 static const struct bitmap_name cap1_names[] = {
294 	{ SVGA_CAP_RECT_COPY, "rect copy" },
295 	{ SVGA_CAP_CURSOR, "cursor" },
296 	{ SVGA_CAP_CURSOR_BYPASS, "cursor bypass" },
297 	{ SVGA_CAP_CURSOR_BYPASS_2, "cursor bypass 2" },
298 	{ SVGA_CAP_8BIT_EMULATION, "8bit emulation" },
299 	{ SVGA_CAP_ALPHA_CURSOR, "alpha cursor" },
300 	{ SVGA_CAP_3D, "3D" },
301 	{ SVGA_CAP_EXTENDED_FIFO, "extended fifo" },
302 	{ SVGA_CAP_MULTIMON, "multimon" },
303 	{ SVGA_CAP_PITCHLOCK, "pitchlock" },
304 	{ SVGA_CAP_IRQMASK, "irq mask" },
305 	{ SVGA_CAP_DISPLAY_TOPOLOGY, "display topology" },
306 	{ SVGA_CAP_GMR, "gmr" },
307 	{ SVGA_CAP_TRACES, "traces" },
308 	{ SVGA_CAP_GMR2, "gmr2" },
309 	{ SVGA_CAP_SCREEN_OBJECT_2, "screen object 2" },
310 	{ SVGA_CAP_COMMAND_BUFFERS, "command buffers" },
311 	{ SVGA_CAP_CMD_BUFFERS_2, "command buffers 2" },
312 	{ SVGA_CAP_GBOBJECTS, "gbobject" },
313 	{ SVGA_CAP_DX, "dx" },
314 	{ SVGA_CAP_HP_CMD_QUEUE, "hp cmd queue" },
315 	{ SVGA_CAP_NO_BB_RESTRICTION, "no bb restriction" },
316 	{ SVGA_CAP_CAP2_REGISTER, "cap2 register" },
317 };
318 
319 
320 static const struct bitmap_name cap2_names[] = {
321 	{ SVGA_CAP2_GROW_OTABLE, "grow otable" },
322 	{ SVGA_CAP2_INTRA_SURFACE_COPY, "intra surface copy" },
323 	{ SVGA_CAP2_DX2, "dx2" },
324 	{ SVGA_CAP2_GB_MEMSIZE_2, "gb memsize 2" },
325 	{ SVGA_CAP2_SCREENDMA_REG, "screendma reg" },
326 	{ SVGA_CAP2_OTABLE_PTDEPTH_2, "otable ptdepth2" },
327 	{ SVGA_CAP2_NON_MS_TO_MS_STRETCHBLT, "non ms to ms stretchblt" },
328 	{ SVGA_CAP2_CURSOR_MOB, "cursor mob" },
329 	{ SVGA_CAP2_MSHINT, "mshint" },
330 	{ SVGA_CAP2_CB_MAX_SIZE_4MB, "cb max size 4mb" },
331 	{ SVGA_CAP2_DX3, "dx3" },
332 	{ SVGA_CAP2_FRAME_TYPE, "frame type" },
333 	{ SVGA_CAP2_COTABLE_COPY, "cotable copy" },
334 	{ SVGA_CAP2_TRACE_FULL_FB, "trace full fb" },
335 	{ SVGA_CAP2_EXTRA_REGS, "extra regs" },
336 	{ SVGA_CAP2_LO_STAGING, "lo staging" },
337 };
338 
vmw_print_bitmap(struct drm_device * drm,const char * prefix,uint32_t bitmap,const struct bitmap_name * bnames,uint32_t num_names)339 static void vmw_print_bitmap(struct drm_device *drm,
340 			     const char *prefix, uint32_t bitmap,
341 			     const struct bitmap_name *bnames,
342 			     uint32_t num_names)
343 {
344 	char buf[512];
345 	uint32_t i;
346 	uint32_t offset = 0;
347 	for (i = 0; i < num_names; ++i) {
348 		if ((bitmap & bnames[i].value) != 0) {
349 			offset += snprintf(buf + offset,
350 					   ARRAY_SIZE(buf) - offset,
351 					   "%s, ", bnames[i].name);
352 			bitmap &= ~bnames[i].value;
353 		}
354 	}
355 
356 	drm_info(drm, "%s: %s\n", prefix, buf);
357 	if (bitmap != 0)
358 		drm_dbg(drm, "%s: unknown enums: %x\n", prefix, bitmap);
359 }
360 
361 
vmw_print_sm_type(struct vmw_private * dev_priv)362 static void vmw_print_sm_type(struct vmw_private *dev_priv)
363 {
364 	static const char *names[] = {
365 		[VMW_SM_LEGACY] = "Legacy",
366 		[VMW_SM_4] = "SM4",
367 		[VMW_SM_4_1] = "SM4_1",
368 		[VMW_SM_5] = "SM_5",
369 		[VMW_SM_5_1X] = "SM_5_1X",
370 		[VMW_SM_MAX] = "Invalid"
371 	};
372 	BUILD_BUG_ON(ARRAY_SIZE(names) != (VMW_SM_MAX + 1));
373 	drm_info(&dev_priv->drm, "Available shader model: %s.\n",
374 		 names[dev_priv->sm_type]);
375 }
376 
377 /**
378  * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
379  *
380  * @dev_priv: A device private structure.
381  *
382  * This function creates a small buffer object that holds the query
383  * result for dummy queries emitted as query barriers.
384  * The function will then map the first page and initialize a pending
385  * occlusion query result structure, Finally it will unmap the buffer.
386  * No interruptible waits are done within this function.
387  *
388  * Returns an error if bo creation or initialization fails.
389  */
vmw_dummy_query_bo_create(struct vmw_private * dev_priv)390 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
391 {
392 	int ret;
393 	struct vmw_bo *vbo;
394 	struct ttm_bo_kmap_obj map;
395 	volatile SVGA3dQueryResult *result;
396 	bool dummy;
397 	struct vmw_bo_params bo_params = {
398 		.domain = VMW_BO_DOMAIN_SYS,
399 		.busy_domain = VMW_BO_DOMAIN_SYS,
400 		.bo_type = ttm_bo_type_kernel,
401 		.size = PAGE_SIZE,
402 		.pin = true
403 	};
404 
405 	/*
406 	 * Create the vbo as pinned, so that a tryreserve will
407 	 * immediately succeed. This is because we're the only
408 	 * user of the bo currently.
409 	 */
410 	ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
411 	if (unlikely(ret != 0))
412 		return ret;
413 
414 	ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
415 	BUG_ON(ret != 0);
416 	vmw_bo_pin_reserved(vbo, true);
417 
418 	ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map);
419 	if (likely(ret == 0)) {
420 		result = ttm_kmap_obj_virtual(&map, &dummy);
421 		result->totalSize = sizeof(*result);
422 		result->state = SVGA3D_QUERYSTATE_PENDING;
423 		result->result32 = 0xff;
424 		ttm_bo_kunmap(&map);
425 	}
426 	vmw_bo_pin_reserved(vbo, false);
427 	ttm_bo_unreserve(&vbo->tbo);
428 
429 	if (unlikely(ret != 0)) {
430 		DRM_ERROR("Dummy query buffer map failed.\n");
431 		vmw_bo_unreference(&vbo);
432 	} else
433 		dev_priv->dummy_query_bo = vbo;
434 
435 	return ret;
436 }
437 
vmw_device_init(struct vmw_private * dev_priv)438 static int vmw_device_init(struct vmw_private *dev_priv)
439 {
440 	bool uses_fb_traces = false;
441 
442 	dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
443 	dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
444 	dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
445 
446 	vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
447 		  SVGA_REG_ENABLE_HIDE);
448 
449 	uses_fb_traces = !vmw_cmd_supported(dev_priv) &&
450 			 (dev_priv->capabilities & SVGA_CAP_TRACES) != 0;
451 
452 	vmw_write(dev_priv, SVGA_REG_TRACES, uses_fb_traces);
453 	dev_priv->fifo = vmw_fifo_create(dev_priv);
454 	if (IS_ERR(dev_priv->fifo)) {
455 		int err = PTR_ERR(dev_priv->fifo);
456 		dev_priv->fifo = NULL;
457 		return err;
458 	} else if (!dev_priv->fifo) {
459 		vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
460 	}
461 
462 	dev_priv->last_read_seqno = vmw_fence_read(dev_priv);
463 	atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
464 	return 0;
465 }
466 
vmw_device_fini(struct vmw_private * vmw)467 static void vmw_device_fini(struct vmw_private *vmw)
468 {
469 	/*
470 	 * Legacy sync
471 	 */
472 	vmw_write(vmw, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
473 	while (vmw_read(vmw, SVGA_REG_BUSY) != 0)
474 		;
475 
476 	vmw->last_read_seqno = vmw_fence_read(vmw);
477 
478 	vmw_write(vmw, SVGA_REG_CONFIG_DONE,
479 		  vmw->config_done_state);
480 	vmw_write(vmw, SVGA_REG_ENABLE,
481 		  vmw->enable_state);
482 	vmw_write(vmw, SVGA_REG_TRACES,
483 		  vmw->traces_state);
484 
485 	vmw_fifo_destroy(vmw);
486 }
487 
488 /**
489  * vmw_request_device_late - Perform late device setup
490  *
491  * @dev_priv: Pointer to device private.
492  *
493  * This function performs setup of otables and enables large command
494  * buffer submission. These tasks are split out to a separate function
495  * because it reverts vmw_release_device_early and is intended to be used
496  * by an error path in the hibernation code.
497  */
vmw_request_device_late(struct vmw_private * dev_priv)498 static int vmw_request_device_late(struct vmw_private *dev_priv)
499 {
500 	int ret;
501 
502 	if (dev_priv->has_mob) {
503 		ret = vmw_otables_setup(dev_priv);
504 		if (unlikely(ret != 0)) {
505 			DRM_ERROR("Unable to initialize "
506 				  "guest Memory OBjects.\n");
507 			return ret;
508 		}
509 	}
510 
511 	if (dev_priv->cman) {
512 		ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 256*4096);
513 		if (ret) {
514 			struct vmw_cmdbuf_man *man = dev_priv->cman;
515 
516 			dev_priv->cman = NULL;
517 			vmw_cmdbuf_man_destroy(man);
518 		}
519 	}
520 
521 	return 0;
522 }
523 
vmw_request_device(struct vmw_private * dev_priv)524 static int vmw_request_device(struct vmw_private *dev_priv)
525 {
526 	int ret;
527 
528 	ret = vmw_device_init(dev_priv);
529 	if (unlikely(ret != 0)) {
530 		DRM_ERROR("Unable to initialize the device.\n");
531 		return ret;
532 	}
533 	vmw_fence_fifo_up(dev_priv->fman);
534 	dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
535 	if (IS_ERR(dev_priv->cman)) {
536 		dev_priv->cman = NULL;
537 		dev_priv->sm_type = VMW_SM_LEGACY;
538 	}
539 
540 	ret = vmw_request_device_late(dev_priv);
541 	if (ret)
542 		goto out_no_mob;
543 
544 	ret = vmw_dummy_query_bo_create(dev_priv);
545 	if (unlikely(ret != 0))
546 		goto out_no_query_bo;
547 
548 	return 0;
549 
550 out_no_query_bo:
551 	if (dev_priv->cman)
552 		vmw_cmdbuf_remove_pool(dev_priv->cman);
553 	if (dev_priv->has_mob) {
554 		struct ttm_resource_manager *man;
555 
556 		man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
557 		ttm_resource_manager_evict_all(&dev_priv->bdev, man);
558 		vmw_otables_takedown(dev_priv);
559 	}
560 	if (dev_priv->cman)
561 		vmw_cmdbuf_man_destroy(dev_priv->cman);
562 out_no_mob:
563 	vmw_fence_fifo_down(dev_priv->fman);
564 	vmw_device_fini(dev_priv);
565 	return ret;
566 }
567 
568 /**
569  * vmw_release_device_early - Early part of fifo takedown.
570  *
571  * @dev_priv: Pointer to device private struct.
572  *
573  * This is the first part of command submission takedown, to be called before
574  * buffer management is taken down.
575  */
vmw_release_device_early(struct vmw_private * dev_priv)576 static void vmw_release_device_early(struct vmw_private *dev_priv)
577 {
578 	/*
579 	 * Previous destructions should've released
580 	 * the pinned bo.
581 	 */
582 
583 	BUG_ON(dev_priv->pinned_bo != NULL);
584 
585 	vmw_bo_unreference(&dev_priv->dummy_query_bo);
586 	if (dev_priv->cman)
587 		vmw_cmdbuf_remove_pool(dev_priv->cman);
588 
589 	if (dev_priv->has_mob) {
590 		struct ttm_resource_manager *man;
591 
592 		man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
593 		ttm_resource_manager_evict_all(&dev_priv->bdev, man);
594 		vmw_otables_takedown(dev_priv);
595 	}
596 }
597 
598 /**
599  * vmw_release_device_late - Late part of fifo takedown.
600  *
601  * @dev_priv: Pointer to device private struct.
602  *
603  * This is the last part of the command submission takedown, to be called when
604  * command submission is no longer needed. It may wait on pending fences.
605  */
vmw_release_device_late(struct vmw_private * dev_priv)606 static void vmw_release_device_late(struct vmw_private *dev_priv)
607 {
608 	vmw_fence_fifo_down(dev_priv->fman);
609 	if (dev_priv->cman)
610 		vmw_cmdbuf_man_destroy(dev_priv->cman);
611 
612 	vmw_device_fini(dev_priv);
613 }
614 
615 /*
616  * Sets the initial_[width|height] fields on the given vmw_private.
617  *
618  * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
619  * clamping the value to fb_max_[width|height] fields and the
620  * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
621  * If the values appear to be invalid, set them to
622  * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
623  */
vmw_get_initial_size(struct vmw_private * dev_priv)624 static void vmw_get_initial_size(struct vmw_private *dev_priv)
625 {
626 	uint32_t width;
627 	uint32_t height;
628 
629 	width = vmw_read(dev_priv, SVGA_REG_WIDTH);
630 	height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
631 
632 	width = max_t(uint32_t, width, VMWGFX_MIN_INITIAL_WIDTH);
633 	height = max_t(uint32_t, height, VMWGFX_MIN_INITIAL_HEIGHT);
634 
635 	if (width > dev_priv->fb_max_width ||
636 	    height > dev_priv->fb_max_height) {
637 
638 		/*
639 		 * This is a host error and shouldn't occur.
640 		 */
641 
642 		width  = VMWGFX_MIN_INITIAL_WIDTH;
643 		height = VMWGFX_MIN_INITIAL_HEIGHT;
644 	}
645 
646 	dev_priv->initial_width = width;
647 	dev_priv->initial_height = height;
648 }
649 
650 /**
651  * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
652  * system.
653  *
654  * @dev_priv: Pointer to a struct vmw_private
655  *
656  * This functions tries to determine what actions need to be taken by the
657  * driver to make system pages visible to the device.
658  * If this function decides that DMA is not possible, it returns -EINVAL.
659  * The driver may then try to disable features of the device that require
660  * DMA.
661  */
vmw_dma_select_mode(struct vmw_private * dev_priv)662 static int vmw_dma_select_mode(struct vmw_private *dev_priv)
663 {
664 	static const char *names[vmw_dma_map_max] = {
665 		[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
666 		[vmw_dma_map_populate] = "Caching DMA mappings.",
667 		[vmw_dma_map_bind] = "Giving up DMA mappings early."};
668 
669 	/* TTM currently doesn't fully support SEV encryption. */
670 	if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
671 		return -EINVAL;
672 
673 	if (vmw_force_coherent)
674 		dev_priv->map_mode = vmw_dma_alloc_coherent;
675 	else if (vmw_restrict_iommu)
676 		dev_priv->map_mode = vmw_dma_map_bind;
677 	else
678 		dev_priv->map_mode = vmw_dma_map_populate;
679 
680 	drm_info(&dev_priv->drm,
681 		 "DMA map mode: %s\n", names[dev_priv->map_mode]);
682 	return 0;
683 }
684 
685 /**
686  * vmw_dma_masks - set required page- and dma masks
687  *
688  * @dev_priv: Pointer to struct drm-device
689  *
690  * With 32-bit we can only handle 32 bit PFNs. Optionally set that
691  * restriction also for 64-bit systems.
692  */
vmw_dma_masks(struct vmw_private * dev_priv)693 static int vmw_dma_masks(struct vmw_private *dev_priv)
694 {
695 	struct drm_device *dev = &dev_priv->drm;
696 	int ret = 0;
697 
698 	ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
699 	if (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask) {
700 		drm_info(&dev_priv->drm,
701 			 "Restricting DMA addresses to 44 bits.\n");
702 		return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
703 	}
704 
705 	return ret;
706 }
707 
vmw_vram_manager_init(struct vmw_private * dev_priv)708 static int vmw_vram_manager_init(struct vmw_private *dev_priv)
709 {
710 	int ret;
711 	ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false,
712 				 dev_priv->vram_size >> PAGE_SHIFT);
713 	ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false);
714 	return ret;
715 }
716 
vmw_vram_manager_fini(struct vmw_private * dev_priv)717 static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
718 {
719 	ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM);
720 }
721 
vmw_setup_pci_resources(struct vmw_private * dev,u32 pci_id)722 static int vmw_setup_pci_resources(struct vmw_private *dev,
723 				   u32 pci_id)
724 {
725 	resource_size_t rmmio_start;
726 	resource_size_t rmmio_size;
727 	resource_size_t fifo_start;
728 	resource_size_t fifo_size;
729 	int ret;
730 	struct pci_dev *pdev = to_pci_dev(dev->drm.dev);
731 
732 	pci_set_master(pdev);
733 
734 	ret = pci_request_regions(pdev, "vmwgfx probe");
735 	if (ret)
736 		return ret;
737 
738 	dev->pci_id = pci_id;
739 	if (pci_id == VMWGFX_PCI_ID_SVGA3) {
740 		rmmio_start = pci_resource_start(pdev, 0);
741 		rmmio_size = pci_resource_len(pdev, 0);
742 		dev->vram_start = pci_resource_start(pdev, 2);
743 		dev->vram_size = pci_resource_len(pdev, 2);
744 
745 		drm_info(&dev->drm,
746 			"Register MMIO at 0x%pa size is %llu kiB\n",
747 			 &rmmio_start, (uint64_t)rmmio_size / 1024);
748 		dev->rmmio = devm_ioremap(dev->drm.dev,
749 					  rmmio_start,
750 					  rmmio_size);
751 		if (!dev->rmmio) {
752 			drm_err(&dev->drm,
753 				"Failed mapping registers mmio memory.\n");
754 			pci_release_regions(pdev);
755 			return -ENOMEM;
756 		}
757 	} else if (pci_id == VMWGFX_PCI_ID_SVGA2) {
758 		dev->io_start = pci_resource_start(pdev, 0);
759 		dev->vram_start = pci_resource_start(pdev, 1);
760 		dev->vram_size = pci_resource_len(pdev, 1);
761 		fifo_start = pci_resource_start(pdev, 2);
762 		fifo_size = pci_resource_len(pdev, 2);
763 
764 		drm_info(&dev->drm,
765 			 "FIFO at %pa size is %llu kiB\n",
766 			 &fifo_start, (uint64_t)fifo_size / 1024);
767 		dev->fifo_mem = devm_memremap(dev->drm.dev,
768 					      fifo_start,
769 					      fifo_size,
770 					      MEMREMAP_WB);
771 
772 		if (IS_ERR(dev->fifo_mem)) {
773 			drm_err(&dev->drm,
774 				  "Failed mapping FIFO memory.\n");
775 			pci_release_regions(pdev);
776 			return PTR_ERR(dev->fifo_mem);
777 		}
778 	} else {
779 		pci_release_regions(pdev);
780 		return -EINVAL;
781 	}
782 
783 	/*
784 	 * This is approximate size of the vram, the exact size will only
785 	 * be known after we read SVGA_REG_VRAM_SIZE. The PCI resource
786 	 * size will be equal to or bigger than the size reported by
787 	 * SVGA_REG_VRAM_SIZE.
788 	 */
789 	drm_info(&dev->drm,
790 		 "VRAM at %pa size is %llu kiB\n",
791 		 &dev->vram_start, (uint64_t)dev->vram_size / 1024);
792 
793 	return 0;
794 }
795 
vmw_detect_version(struct vmw_private * dev)796 static int vmw_detect_version(struct vmw_private *dev)
797 {
798 	uint32_t svga_id;
799 
800 	vmw_write(dev, SVGA_REG_ID, vmw_is_svga_v3(dev) ?
801 			  SVGA_ID_3 : SVGA_ID_2);
802 	svga_id = vmw_read(dev, SVGA_REG_ID);
803 	if (svga_id != SVGA_ID_2 && svga_id != SVGA_ID_3) {
804 		drm_err(&dev->drm,
805 			"Unsupported SVGA ID 0x%x on chipset 0x%x\n",
806 			svga_id, dev->pci_id);
807 		return -ENOSYS;
808 	}
809 	BUG_ON(vmw_is_svga_v3(dev) && (svga_id != SVGA_ID_3));
810 	drm_info(&dev->drm,
811 		 "Running on SVGA version %d.\n", (svga_id & 0xff));
812 	return 0;
813 }
814 
vmw_write_driver_id(struct vmw_private * dev)815 static void vmw_write_driver_id(struct vmw_private *dev)
816 {
817 	if ((dev->capabilities2 & SVGA_CAP2_DX2) != 0) {
818 		vmw_write(dev,  SVGA_REG_GUEST_DRIVER_ID,
819 			  SVGA_REG_GUEST_DRIVER_ID_LINUX);
820 
821 		vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION1,
822 			  LINUX_VERSION_MAJOR << 24 |
823 			  LINUX_VERSION_PATCHLEVEL << 16 |
824 			  LINUX_VERSION_SUBLEVEL);
825 		vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION2,
826 			  VMWGFX_DRIVER_MAJOR << 24 |
827 			  VMWGFX_DRIVER_MINOR << 16 |
828 			  VMWGFX_DRIVER_PATCHLEVEL);
829 		vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION3, 0);
830 
831 		vmw_write(dev, SVGA_REG_GUEST_DRIVER_ID,
832 			  SVGA_REG_GUEST_DRIVER_ID_SUBMIT);
833 	}
834 }
835 
vmw_sw_context_init(struct vmw_private * dev_priv)836 static void vmw_sw_context_init(struct vmw_private *dev_priv)
837 {
838 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
839 
840 	hash_init(sw_context->res_ht);
841 }
842 
vmw_sw_context_fini(struct vmw_private * dev_priv)843 static void vmw_sw_context_fini(struct vmw_private *dev_priv)
844 {
845 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
846 
847 	vfree(sw_context->cmd_bounce);
848 	if (sw_context->staged_bindings)
849 		vmw_binding_state_free(sw_context->staged_bindings);
850 }
851 
vmw_driver_load(struct vmw_private * dev_priv,u32 pci_id)852 static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
853 {
854 	int ret;
855 	enum vmw_res_type i;
856 	bool refuse_dma = false;
857 	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
858 
859 	dev_priv->drm.dev_private = dev_priv;
860 
861 	vmw_sw_context_init(dev_priv);
862 
863 	mutex_init(&dev_priv->cmdbuf_mutex);
864 	mutex_init(&dev_priv->binding_mutex);
865 	spin_lock_init(&dev_priv->resource_lock);
866 	spin_lock_init(&dev_priv->hw_lock);
867 	spin_lock_init(&dev_priv->waiter_lock);
868 	spin_lock_init(&dev_priv->cursor_lock);
869 
870 	ret = vmw_setup_pci_resources(dev_priv, pci_id);
871 	if (ret)
872 		return ret;
873 	ret = vmw_detect_version(dev_priv);
874 	if (ret)
875 		goto out_no_pci_or_version;
876 
877 
878 	for (i = vmw_res_context; i < vmw_res_max; ++i) {
879 		idr_init_base(&dev_priv->res_idr[i], 1);
880 		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
881 	}
882 
883 	init_waitqueue_head(&dev_priv->fence_queue);
884 	init_waitqueue_head(&dev_priv->fifo_queue);
885 	dev_priv->fence_queue_waiters = 0;
886 	dev_priv->fifo_queue_waiters = 0;
887 
888 	dev_priv->used_memory_size = 0;
889 
890 	dev_priv->assume_16bpp = !!vmw_assume_16bpp;
891 
892 	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
893 	vmw_print_bitmap(&dev_priv->drm, "Capabilities",
894 			 dev_priv->capabilities,
895 			 cap1_names, ARRAY_SIZE(cap1_names));
896 	if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
897 		dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
898 		vmw_print_bitmap(&dev_priv->drm, "Capabilities2",
899 				 dev_priv->capabilities2,
900 				 cap2_names, ARRAY_SIZE(cap2_names));
901 	}
902 
903 	if (!vmwgfx_supported(dev_priv)) {
904 		vmw_disable_backdoor();
905 		drm_err_once(&dev_priv->drm,
906 			     "vmwgfx seems to be running on an unsupported hypervisor.");
907 		drm_err_once(&dev_priv->drm,
908 			     "This configuration is likely broken.");
909 		drm_err_once(&dev_priv->drm,
910 			     "Please switch to a supported graphics device to avoid problems.");
911 	}
912 
913 	ret = vmw_dma_select_mode(dev_priv);
914 	if (unlikely(ret != 0)) {
915 		drm_info(&dev_priv->drm,
916 			 "Restricting capabilities since DMA not available.\n");
917 		refuse_dma = true;
918 		if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
919 			drm_info(&dev_priv->drm,
920 				 "Disabling 3D acceleration.\n");
921 	}
922 
923 	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
924 	dev_priv->fifo_mem_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
925 	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
926 	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
927 
928 	vmw_get_initial_size(dev_priv);
929 
930 	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
931 		dev_priv->max_gmr_ids =
932 			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
933 		dev_priv->max_gmr_pages =
934 			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
935 		dev_priv->memory_size =
936 			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
937 		dev_priv->memory_size -= dev_priv->vram_size;
938 	} else {
939 		/*
940 		 * An arbitrary limit of 512MiB on surface
941 		 * memory. But all HWV8 hardware supports GMR2.
942 		 */
943 		dev_priv->memory_size = 512*1024*1024;
944 	}
945 	dev_priv->max_mob_pages = 0;
946 	dev_priv->max_mob_size = 0;
947 	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
948 		uint64_t mem_size;
949 
950 		if (dev_priv->capabilities2 & SVGA_CAP2_GB_MEMSIZE_2)
951 			mem_size = vmw_read(dev_priv,
952 					    SVGA_REG_GBOBJECT_MEM_SIZE_KB);
953 		else
954 			mem_size =
955 				vmw_read(dev_priv,
956 					 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
957 
958 		/*
959 		 * Workaround for low memory 2D VMs to compensate for the
960 		 * allocation taken by fbdev
961 		 */
962 		if (!(dev_priv->capabilities & SVGA_CAP_3D))
963 			mem_size *= 3;
964 
965 		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
966 		dev_priv->max_primary_mem =
967 			vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_MEM);
968 		dev_priv->max_mob_size =
969 			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
970 		dev_priv->stdu_max_width =
971 			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
972 		dev_priv->stdu_max_height =
973 			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
974 
975 		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
976 			  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
977 		dev_priv->texture_max_width = vmw_read(dev_priv,
978 						       SVGA_REG_DEV_CAP);
979 		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
980 			  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
981 		dev_priv->texture_max_height = vmw_read(dev_priv,
982 							SVGA_REG_DEV_CAP);
983 	} else {
984 		dev_priv->texture_max_width = 8192;
985 		dev_priv->texture_max_height = 8192;
986 		dev_priv->max_primary_mem = dev_priv->vram_size;
987 	}
988 	drm_info(&dev_priv->drm,
989 		 "Legacy memory limits: VRAM = %llu kB, FIFO = %llu kB, surface = %u kB\n",
990 		 (u64)dev_priv->vram_size / 1024,
991 		 (u64)dev_priv->fifo_mem_size / 1024,
992 		 dev_priv->memory_size / 1024);
993 
994 	drm_info(&dev_priv->drm,
995 		 "MOB limits: max mob size = %u kB, max mob pages = %u\n",
996 		 dev_priv->max_mob_size / 1024, dev_priv->max_mob_pages);
997 
998 	ret = vmw_dma_masks(dev_priv);
999 	if (unlikely(ret != 0))
1000 		goto out_err0;
1001 
1002 	dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX);
1003 
1004 	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
1005 		drm_info(&dev_priv->drm,
1006 			 "Max GMR ids is %u\n",
1007 			 (unsigned)dev_priv->max_gmr_ids);
1008 		drm_info(&dev_priv->drm,
1009 			 "Max number of GMR pages is %u\n",
1010 			 (unsigned)dev_priv->max_gmr_pages);
1011 	}
1012 	drm_info(&dev_priv->drm,
1013 		 "Maximum display memory size is %llu kiB\n",
1014 		 (uint64_t)dev_priv->max_primary_mem / 1024);
1015 
1016 	/* Need mmio memory to check for fifo pitchlock cap. */
1017 	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
1018 	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
1019 	    !vmw_fifo_have_pitchlock(dev_priv)) {
1020 		ret = -ENOSYS;
1021 		DRM_ERROR("Hardware has no pitchlock\n");
1022 		goto out_err0;
1023 	}
1024 
1025 	dev_priv->tdev = ttm_object_device_init(&vmw_prime_dmabuf_ops);
1026 
1027 	if (unlikely(dev_priv->tdev == NULL)) {
1028 		drm_err(&dev_priv->drm,
1029 			"Unable to initialize TTM object management.\n");
1030 		ret = -ENOMEM;
1031 		goto out_err0;
1032 	}
1033 
1034 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
1035 		ret = vmw_irq_install(dev_priv);
1036 		if (ret != 0) {
1037 			drm_err(&dev_priv->drm,
1038 				"Failed installing irq: %d\n", ret);
1039 			goto out_no_irq;
1040 		}
1041 	}
1042 
1043 	dev_priv->fman = vmw_fence_manager_init(dev_priv);
1044 	if (unlikely(dev_priv->fman == NULL)) {
1045 		ret = -ENOMEM;
1046 		goto out_no_fman;
1047 	}
1048 
1049 	ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
1050 			      dev_priv->drm.dev,
1051 			      dev_priv->drm.anon_inode->i_mapping,
1052 			      dev_priv->drm.vma_offset_manager,
1053 			      dev_priv->map_mode == vmw_dma_alloc_coherent,
1054 			      false);
1055 	if (unlikely(ret != 0)) {
1056 		drm_err(&dev_priv->drm,
1057 			"Failed initializing TTM buffer object driver.\n");
1058 		goto out_no_bdev;
1059 	}
1060 
1061 	/*
1062 	 * Enable VRAM, but initially don't use it until SVGA is enabled and
1063 	 * unhidden.
1064 	 */
1065 
1066 	ret = vmw_vram_manager_init(dev_priv);
1067 	if (unlikely(ret != 0)) {
1068 		drm_err(&dev_priv->drm,
1069 			"Failed initializing memory manager for VRAM.\n");
1070 		goto out_no_vram;
1071 	}
1072 
1073 	ret = vmw_devcaps_create(dev_priv);
1074 	if (unlikely(ret != 0)) {
1075 		drm_err(&dev_priv->drm,
1076 			"Failed initializing device caps.\n");
1077 		goto out_no_vram;
1078 	}
1079 
1080 	/*
1081 	 * "Guest Memory Regions" is an aperture like feature with
1082 	 *  one slot per bo. There is an upper limit of the number of
1083 	 *  slots as well as the bo size.
1084 	 */
1085 	dev_priv->has_gmr = true;
1086 	/* TODO: This is most likely not correct */
1087 	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
1088 	    refuse_dma ||
1089 	    vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) {
1090 		drm_info(&dev_priv->drm,
1091 			  "No GMR memory available. "
1092 			 "Graphics memory resources are very limited.\n");
1093 		dev_priv->has_gmr = false;
1094 	}
1095 
1096 	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
1097 		dev_priv->has_mob = true;
1098 
1099 		if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) {
1100 			drm_info(&dev_priv->drm,
1101 				 "No MOB memory available. "
1102 				 "3D will be disabled.\n");
1103 			dev_priv->has_mob = false;
1104 		}
1105 		if (vmw_sys_man_init(dev_priv) != 0) {
1106 			drm_info(&dev_priv->drm,
1107 				 "No MOB page table memory available. "
1108 				 "3D will be disabled.\n");
1109 			dev_priv->has_mob = false;
1110 		}
1111 	}
1112 
1113 	if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
1114 		if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_DXCONTEXT))
1115 			dev_priv->sm_type = VMW_SM_4;
1116 	}
1117 
1118 	/* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
1119 	if (has_sm4_context(dev_priv) &&
1120 	    (dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
1121 		if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM41))
1122 			dev_priv->sm_type = VMW_SM_4_1;
1123 		if (has_sm4_1_context(dev_priv) &&
1124 				(dev_priv->capabilities2 & SVGA_CAP2_DX3)) {
1125 			if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM5)) {
1126 				dev_priv->sm_type = VMW_SM_5;
1127 				if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_GL43))
1128 					dev_priv->sm_type = VMW_SM_5_1X;
1129 			}
1130 		}
1131 	}
1132 
1133 	ret = vmw_kms_init(dev_priv);
1134 	if (unlikely(ret != 0))
1135 		goto out_no_kms;
1136 	vmw_overlay_init(dev_priv);
1137 
1138 	ret = vmw_request_device(dev_priv);
1139 	if (ret)
1140 		goto out_no_fifo;
1141 
1142 	vmw_print_sm_type(dev_priv);
1143 	vmw_host_printf("vmwgfx: Module Version: %d.%d.%d (kernel: %s)",
1144 			VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
1145 			VMWGFX_DRIVER_PATCHLEVEL, UTS_RELEASE);
1146 	vmw_write_driver_id(dev_priv);
1147 
1148 	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
1149 	register_pm_notifier(&dev_priv->pm_nb);
1150 
1151 	return 0;
1152 
1153 out_no_fifo:
1154 	vmw_overlay_close(dev_priv);
1155 	vmw_kms_close(dev_priv);
1156 out_no_kms:
1157 	if (dev_priv->has_mob) {
1158 		vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
1159 		vmw_sys_man_fini(dev_priv);
1160 	}
1161 	if (dev_priv->has_gmr)
1162 		vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
1163 	vmw_devcaps_destroy(dev_priv);
1164 	vmw_vram_manager_fini(dev_priv);
1165 out_no_vram:
1166 	ttm_device_fini(&dev_priv->bdev);
1167 out_no_bdev:
1168 	vmw_fence_manager_takedown(dev_priv->fman);
1169 out_no_fman:
1170 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
1171 		vmw_irq_uninstall(&dev_priv->drm);
1172 out_no_irq:
1173 	ttm_object_device_release(&dev_priv->tdev);
1174 out_err0:
1175 	for (i = vmw_res_context; i < vmw_res_max; ++i)
1176 		idr_destroy(&dev_priv->res_idr[i]);
1177 
1178 	if (dev_priv->ctx.staged_bindings)
1179 		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
1180 out_no_pci_or_version:
1181 	pci_release_regions(pdev);
1182 	return ret;
1183 }
1184 
vmw_driver_unload(struct drm_device * dev)1185 static void vmw_driver_unload(struct drm_device *dev)
1186 {
1187 	struct vmw_private *dev_priv = vmw_priv(dev);
1188 	struct pci_dev *pdev = to_pci_dev(dev->dev);
1189 	enum vmw_res_type i;
1190 
1191 	unregister_pm_notifier(&dev_priv->pm_nb);
1192 
1193 	vmw_sw_context_fini(dev_priv);
1194 	vmw_fifo_resource_dec(dev_priv);
1195 
1196 	vmw_svga_disable(dev_priv);
1197 
1198 	vmw_kms_close(dev_priv);
1199 	vmw_overlay_close(dev_priv);
1200 
1201 	if (dev_priv->has_gmr)
1202 		vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
1203 
1204 	vmw_release_device_early(dev_priv);
1205 	if (dev_priv->has_mob) {
1206 		vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
1207 		vmw_sys_man_fini(dev_priv);
1208 	}
1209 	vmw_devcaps_destroy(dev_priv);
1210 	vmw_vram_manager_fini(dev_priv);
1211 	ttm_device_fini(&dev_priv->bdev);
1212 	vmw_release_device_late(dev_priv);
1213 	vmw_fence_manager_takedown(dev_priv->fman);
1214 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
1215 		vmw_irq_uninstall(&dev_priv->drm);
1216 
1217 	ttm_object_device_release(&dev_priv->tdev);
1218 
1219 	for (i = vmw_res_context; i < vmw_res_max; ++i)
1220 		idr_destroy(&dev_priv->res_idr[i]);
1221 
1222 	vmw_mksstat_remove_all(dev_priv);
1223 
1224 	pci_release_regions(pdev);
1225 }
1226 
vmw_postclose(struct drm_device * dev,struct drm_file * file_priv)1227 static void vmw_postclose(struct drm_device *dev,
1228 			 struct drm_file *file_priv)
1229 {
1230 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1231 
1232 	ttm_object_file_release(&vmw_fp->tfile);
1233 	kfree(vmw_fp);
1234 }
1235 
vmw_driver_open(struct drm_device * dev,struct drm_file * file_priv)1236 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1237 {
1238 	struct vmw_private *dev_priv = vmw_priv(dev);
1239 	struct vmw_fpriv *vmw_fp;
1240 	int ret = -ENOMEM;
1241 
1242 	vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1243 	if (unlikely(!vmw_fp))
1244 		return ret;
1245 
1246 	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev);
1247 	if (unlikely(vmw_fp->tfile == NULL))
1248 		goto out_no_tfile;
1249 
1250 	file_priv->driver_priv = vmw_fp;
1251 
1252 	return 0;
1253 
1254 out_no_tfile:
1255 	kfree(vmw_fp);
1256 	return ret;
1257 }
1258 
vmw_generic_ioctl(struct file * filp,unsigned int cmd,unsigned long arg,long (* ioctl_func)(struct file *,unsigned int,unsigned long))1259 static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1260 			      unsigned long arg,
1261 			      long (*ioctl_func)(struct file *, unsigned int,
1262 						 unsigned long))
1263 {
1264 	struct drm_file *file_priv = filp->private_data;
1265 	struct drm_device *dev = file_priv->minor->dev;
1266 	unsigned int nr = DRM_IOCTL_NR(cmd);
1267 	unsigned int flags;
1268 
1269 	/*
1270 	 * Do extra checking on driver private ioctls.
1271 	 */
1272 
1273 	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1274 	    && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1275 		const struct drm_ioctl_desc *ioctl =
1276 			&vmw_ioctls[nr - DRM_COMMAND_BASE];
1277 
1278 		if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1279 			return ioctl_func(filp, cmd, arg);
1280 		} else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
1281 			if (!drm_is_current_master(file_priv) &&
1282 			    !capable(CAP_SYS_ADMIN))
1283 				return -EACCES;
1284 		}
1285 
1286 		if (unlikely(ioctl->cmd != cmd))
1287 			goto out_io_encoding;
1288 
1289 		flags = ioctl->flags;
1290 	} else if (!drm_ioctl_flags(nr, &flags))
1291 		return -EINVAL;
1292 
1293 	return ioctl_func(filp, cmd, arg);
1294 
1295 out_io_encoding:
1296 	DRM_ERROR("Invalid command format, ioctl %d\n",
1297 		  nr - DRM_COMMAND_BASE);
1298 
1299 	return -EINVAL;
1300 }
1301 
vmw_unlocked_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1302 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1303 			       unsigned long arg)
1304 {
1305 	return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1306 }
1307 
1308 #ifdef CONFIG_COMPAT
vmw_compat_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1309 static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1310 			     unsigned long arg)
1311 {
1312 	return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1313 }
1314 #endif
1315 
vmw_master_set(struct drm_device * dev,struct drm_file * file_priv,bool from_open)1316 static void vmw_master_set(struct drm_device *dev,
1317 			   struct drm_file *file_priv,
1318 			   bool from_open)
1319 {
1320 	/*
1321 	 * Inform a new master that the layout may have changed while
1322 	 * it was gone.
1323 	 */
1324 	if (!from_open)
1325 		drm_sysfs_hotplug_event(dev);
1326 }
1327 
vmw_master_drop(struct drm_device * dev,struct drm_file * file_priv)1328 static void vmw_master_drop(struct drm_device *dev,
1329 			    struct drm_file *file_priv)
1330 {
1331 	struct vmw_private *dev_priv = vmw_priv(dev);
1332 
1333 	vmw_kms_legacy_hotspot_clear(dev_priv);
1334 }
1335 
vmwgfx_supported(struct vmw_private * vmw)1336 bool vmwgfx_supported(struct vmw_private *vmw)
1337 {
1338 #if defined(CONFIG_X86)
1339 	return hypervisor_is_type(X86_HYPER_VMWARE);
1340 #elif defined(CONFIG_ARM64)
1341 	/*
1342 	 * On aarch64 only svga3 is supported
1343 	 */
1344 	return vmw->pci_id == VMWGFX_PCI_ID_SVGA3;
1345 #else
1346 	drm_warn_once(&vmw->drm,
1347 		      "vmwgfx is running on an unknown architecture.");
1348 	return false;
1349 #endif
1350 }
1351 
1352 /**
1353  * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1354  *
1355  * @dev_priv: Pointer to device private struct.
1356  * Needs the reservation sem to be held in non-exclusive mode.
1357  */
__vmw_svga_enable(struct vmw_private * dev_priv)1358 static void __vmw_svga_enable(struct vmw_private *dev_priv)
1359 {
1360 	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1361 
1362 	if (!ttm_resource_manager_used(man)) {
1363 		vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE);
1364 		ttm_resource_manager_set_used(man, true);
1365 	}
1366 }
1367 
1368 /**
1369  * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1370  *
1371  * @dev_priv: Pointer to device private struct.
1372  */
vmw_svga_enable(struct vmw_private * dev_priv)1373 void vmw_svga_enable(struct vmw_private *dev_priv)
1374 {
1375 	__vmw_svga_enable(dev_priv);
1376 }
1377 
1378 /**
1379  * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1380  *
1381  * @dev_priv: Pointer to device private struct.
1382  * Needs the reservation sem to be held in exclusive mode.
1383  * Will not empty VRAM. VRAM must be emptied by caller.
1384  */
__vmw_svga_disable(struct vmw_private * dev_priv)1385 static void __vmw_svga_disable(struct vmw_private *dev_priv)
1386 {
1387 	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1388 
1389 	if (ttm_resource_manager_used(man)) {
1390 		ttm_resource_manager_set_used(man, false);
1391 		vmw_write(dev_priv, SVGA_REG_ENABLE,
1392 			  SVGA_REG_ENABLE_HIDE |
1393 			  SVGA_REG_ENABLE_ENABLE);
1394 	}
1395 }
1396 
1397 /**
1398  * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1399  * running.
1400  *
1401  * @dev_priv: Pointer to device private struct.
1402  * Will empty VRAM.
1403  */
vmw_svga_disable(struct vmw_private * dev_priv)1404 void vmw_svga_disable(struct vmw_private *dev_priv)
1405 {
1406 	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1407 	/*
1408 	 * Disabling SVGA will turn off device modesetting capabilities, so
1409 	 * notify KMS about that so that it doesn't cache atomic state that
1410 	 * isn't valid anymore, for example crtcs turned on.
1411 	 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
1412 	 * but vmw_kms_lost_device() takes the reservation sem and thus we'll
1413 	 * end up with lock order reversal. Thus, a master may actually perform
1414 	 * a new modeset just after we call vmw_kms_lost_device() and race with
1415 	 * vmw_svga_disable(), but that should at worst cause atomic KMS state
1416 	 * to be inconsistent with the device, causing modesetting problems.
1417 	 *
1418 	 */
1419 	vmw_kms_lost_device(&dev_priv->drm);
1420 	if (ttm_resource_manager_used(man)) {
1421 		if (ttm_resource_manager_evict_all(&dev_priv->bdev, man))
1422 			DRM_ERROR("Failed evicting VRAM buffers.\n");
1423 		ttm_resource_manager_set_used(man, false);
1424 		vmw_write(dev_priv, SVGA_REG_ENABLE,
1425 			  SVGA_REG_ENABLE_HIDE |
1426 			  SVGA_REG_ENABLE_ENABLE);
1427 	}
1428 }
1429 
vmw_remove(struct pci_dev * pdev)1430 static void vmw_remove(struct pci_dev *pdev)
1431 {
1432 	struct drm_device *dev = pci_get_drvdata(pdev);
1433 
1434 	drm_dev_unregister(dev);
1435 	vmw_driver_unload(dev);
1436 }
1437 
vmw_debugfs_resource_managers_init(struct vmw_private * vmw)1438 static void vmw_debugfs_resource_managers_init(struct vmw_private *vmw)
1439 {
1440 	struct drm_minor *minor = vmw->drm.primary;
1441 	struct dentry *root = minor->debugfs_root;
1442 
1443 	ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_SYSTEM),
1444 					    root, "system_ttm");
1445 	ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_VRAM),
1446 					    root, "vram_ttm");
1447 	ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR),
1448 					    root, "gmr_ttm");
1449 	ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB),
1450 					    root, "mob_ttm");
1451 	ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM),
1452 					    root, "system_mob_ttm");
1453 }
1454 
vmwgfx_pm_notifier(struct notifier_block * nb,unsigned long val,void * ptr)1455 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1456 			      void *ptr)
1457 {
1458 	struct vmw_private *dev_priv =
1459 		container_of(nb, struct vmw_private, pm_nb);
1460 
1461 	switch (val) {
1462 	case PM_HIBERNATION_PREPARE:
1463 		/*
1464 		 * Take the reservation sem in write mode, which will make sure
1465 		 * there are no other processes holding a buffer object
1466 		 * reservation, meaning we should be able to evict all buffer
1467 		 * objects if needed.
1468 		 * Once user-space processes have been frozen, we can release
1469 		 * the lock again.
1470 		 */
1471 		dev_priv->suspend_locked = true;
1472 		break;
1473 	case PM_POST_HIBERNATION:
1474 	case PM_POST_RESTORE:
1475 		if (READ_ONCE(dev_priv->suspend_locked)) {
1476 			dev_priv->suspend_locked = false;
1477 		}
1478 		break;
1479 	default:
1480 		break;
1481 	}
1482 	return 0;
1483 }
1484 
vmw_pci_suspend(struct pci_dev * pdev,pm_message_t state)1485 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1486 {
1487 	struct drm_device *dev = pci_get_drvdata(pdev);
1488 	struct vmw_private *dev_priv = vmw_priv(dev);
1489 
1490 	if (dev_priv->refuse_hibernation)
1491 		return -EBUSY;
1492 
1493 	pci_save_state(pdev);
1494 	pci_disable_device(pdev);
1495 	pci_set_power_state(pdev, PCI_D3hot);
1496 	return 0;
1497 }
1498 
vmw_pci_resume(struct pci_dev * pdev)1499 static int vmw_pci_resume(struct pci_dev *pdev)
1500 {
1501 	pci_set_power_state(pdev, PCI_D0);
1502 	pci_restore_state(pdev);
1503 	return pci_enable_device(pdev);
1504 }
1505 
vmw_pm_suspend(struct device * kdev)1506 static int vmw_pm_suspend(struct device *kdev)
1507 {
1508 	struct pci_dev *pdev = to_pci_dev(kdev);
1509 	struct pm_message dummy;
1510 
1511 	dummy.event = 0;
1512 
1513 	return vmw_pci_suspend(pdev, dummy);
1514 }
1515 
vmw_pm_resume(struct device * kdev)1516 static int vmw_pm_resume(struct device *kdev)
1517 {
1518 	struct pci_dev *pdev = to_pci_dev(kdev);
1519 
1520 	return vmw_pci_resume(pdev);
1521 }
1522 
vmw_pm_freeze(struct device * kdev)1523 static int vmw_pm_freeze(struct device *kdev)
1524 {
1525 	struct pci_dev *pdev = to_pci_dev(kdev);
1526 	struct drm_device *dev = pci_get_drvdata(pdev);
1527 	struct vmw_private *dev_priv = vmw_priv(dev);
1528 	struct ttm_operation_ctx ctx = {
1529 		.interruptible = false,
1530 		.no_wait_gpu = false
1531 	};
1532 	int ret;
1533 
1534 	/*
1535 	 * No user-space processes should be running now.
1536 	 */
1537 	ret = vmw_kms_suspend(&dev_priv->drm);
1538 	if (ret) {
1539 		DRM_ERROR("Failed to freeze modesetting.\n");
1540 		return ret;
1541 	}
1542 
1543 	vmw_execbuf_release_pinned_bo(dev_priv);
1544 	vmw_resource_evict_all(dev_priv);
1545 	vmw_release_device_early(dev_priv);
1546 	while (ttm_device_swapout(&dev_priv->bdev, &ctx, GFP_KERNEL) > 0);
1547 	vmw_fifo_resource_dec(dev_priv);
1548 	if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1549 		DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1550 		vmw_fifo_resource_inc(dev_priv);
1551 		WARN_ON(vmw_request_device_late(dev_priv));
1552 		dev_priv->suspend_locked = false;
1553 		if (dev_priv->suspend_state)
1554 			vmw_kms_resume(dev);
1555 		return -EBUSY;
1556 	}
1557 
1558 	vmw_fence_fifo_down(dev_priv->fman);
1559 	__vmw_svga_disable(dev_priv);
1560 
1561 	vmw_release_device_late(dev_priv);
1562 	return 0;
1563 }
1564 
vmw_pm_restore(struct device * kdev)1565 static int vmw_pm_restore(struct device *kdev)
1566 {
1567 	struct pci_dev *pdev = to_pci_dev(kdev);
1568 	struct drm_device *dev = pci_get_drvdata(pdev);
1569 	struct vmw_private *dev_priv = vmw_priv(dev);
1570 	int ret;
1571 
1572 	vmw_detect_version(dev_priv);
1573 
1574 	vmw_fifo_resource_inc(dev_priv);
1575 
1576 	ret = vmw_request_device(dev_priv);
1577 	if (ret)
1578 		return ret;
1579 
1580 	__vmw_svga_enable(dev_priv);
1581 
1582 	vmw_fence_fifo_up(dev_priv->fman);
1583 	dev_priv->suspend_locked = false;
1584 	if (dev_priv->suspend_state)
1585 		vmw_kms_resume(&dev_priv->drm);
1586 
1587 	return 0;
1588 }
1589 
1590 static const struct dev_pm_ops vmw_pm_ops = {
1591 	.freeze = vmw_pm_freeze,
1592 	.thaw = vmw_pm_restore,
1593 	.restore = vmw_pm_restore,
1594 	.suspend = vmw_pm_suspend,
1595 	.resume = vmw_pm_resume,
1596 };
1597 
1598 static const struct file_operations vmwgfx_driver_fops = {
1599 	.owner = THIS_MODULE,
1600 	.open = drm_open,
1601 	.release = drm_release,
1602 	.unlocked_ioctl = vmw_unlocked_ioctl,
1603 	.mmap = drm_gem_mmap,
1604 	.poll = drm_poll,
1605 	.read = drm_read,
1606 #if defined(CONFIG_COMPAT)
1607 	.compat_ioctl = vmw_compat_ioctl,
1608 #endif
1609 	.llseek = noop_llseek,
1610 };
1611 
1612 static const struct drm_driver driver = {
1613 	.driver_features =
1614 	DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM,
1615 	.ioctls = vmw_ioctls,
1616 	.num_ioctls = ARRAY_SIZE(vmw_ioctls),
1617 	.master_set = vmw_master_set,
1618 	.master_drop = vmw_master_drop,
1619 	.open = vmw_driver_open,
1620 	.postclose = vmw_postclose,
1621 
1622 	.dumb_create = vmw_dumb_create,
1623 	.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
1624 
1625 	.prime_fd_to_handle = vmw_prime_fd_to_handle,
1626 	.prime_handle_to_fd = vmw_prime_handle_to_fd,
1627 
1628 	.fops = &vmwgfx_driver_fops,
1629 	.name = VMWGFX_DRIVER_NAME,
1630 	.desc = VMWGFX_DRIVER_DESC,
1631 	.date = VMWGFX_DRIVER_DATE,
1632 	.major = VMWGFX_DRIVER_MAJOR,
1633 	.minor = VMWGFX_DRIVER_MINOR,
1634 	.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1635 };
1636 
1637 static struct pci_driver vmw_pci_driver = {
1638 	.name = VMWGFX_DRIVER_NAME,
1639 	.id_table = vmw_pci_id_list,
1640 	.probe = vmw_probe,
1641 	.remove = vmw_remove,
1642 	.driver = {
1643 		.pm = &vmw_pm_ops
1644 	}
1645 };
1646 
vmw_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1647 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1648 {
1649 	struct vmw_private *vmw;
1650 	int ret;
1651 
1652 	ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
1653 	if (ret)
1654 		goto out_error;
1655 
1656 	ret = pcim_enable_device(pdev);
1657 	if (ret)
1658 		goto out_error;
1659 
1660 	vmw = devm_drm_dev_alloc(&pdev->dev, &driver,
1661 				 struct vmw_private, drm);
1662 	if (IS_ERR(vmw)) {
1663 		ret = PTR_ERR(vmw);
1664 		goto out_error;
1665 	}
1666 
1667 	pci_set_drvdata(pdev, &vmw->drm);
1668 
1669 	ret = vmw_driver_load(vmw, ent->device);
1670 	if (ret)
1671 		goto out_error;
1672 
1673 	ret = drm_dev_register(&vmw->drm, 0);
1674 	if (ret)
1675 		goto out_unload;
1676 
1677 	vmw_fifo_resource_inc(vmw);
1678 	vmw_svga_enable(vmw);
1679 	drm_fbdev_generic_setup(&vmw->drm,  0);
1680 
1681 	vmw_debugfs_gem_init(vmw);
1682 	vmw_debugfs_resource_managers_init(vmw);
1683 
1684 	return 0;
1685 out_unload:
1686 	vmw_driver_unload(&vmw->drm);
1687 out_error:
1688 	return ret;
1689 }
1690 
1691 drm_module_pci_driver(vmw_pci_driver);
1692 
1693 MODULE_AUTHOR("VMware Inc. and others");
1694 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1695 MODULE_LICENSE("GPL and additional rights");
1696 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1697 	       __stringify(VMWGFX_DRIVER_MINOR) "."
1698 	       __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1699 	       "0");
1700