1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/drm_atomic.h>
29 #include <drm/drm_atomic_helper.h>
30 #include <drm/drm_damage_helper.h>
31 #include <drm/drm_fourcc.h>
32 #include <drm/drm_rect.h>
33 #include <drm/drm_sysfs.h>
34 #include <drm/drm_vblank.h>
35 
36 #include "vmwgfx_kms.h"
37 
vmw_du_cleanup(struct vmw_display_unit * du)38 void vmw_du_cleanup(struct vmw_display_unit *du)
39 {
40 	struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
41 	drm_plane_cleanup(&du->primary);
42 	if (vmw_cmd_supported(dev_priv))
43 		drm_plane_cleanup(&du->cursor.base);
44 
45 	drm_connector_unregister(&du->connector);
46 	drm_crtc_cleanup(&du->crtc);
47 	drm_encoder_cleanup(&du->encoder);
48 	drm_connector_cleanup(&du->connector);
49 }
50 
51 /*
52  * Display Unit Cursor functions
53  */
54 
55 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
56 				  struct ttm_buffer_object *bo,
57 				  struct ttm_bo_kmap_obj *map,
58 				  u32 *image, u32 width, u32 height,
59 				  u32 hotspotX, u32 hotspotY);
60 
61 struct vmw_svga_fifo_cmd_define_cursor {
62 	u32 cmd;
63 	SVGAFifoCmdDefineAlphaCursor cursor;
64 };
65 
vmw_cursor_update_image(struct vmw_private * dev_priv,struct ttm_buffer_object * cm_bo,struct ttm_bo_kmap_obj * cm_map,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)66 static void vmw_cursor_update_image(struct vmw_private *dev_priv,
67 				    struct ttm_buffer_object *cm_bo,
68 				    struct ttm_bo_kmap_obj *cm_map,
69 				    u32 *image, u32 width, u32 height,
70 				    u32 hotspotX, u32 hotspotY)
71 {
72 	struct vmw_svga_fifo_cmd_define_cursor *cmd;
73 	const u32 image_size = width * height * sizeof(*image);
74 	const u32 cmd_size = sizeof(*cmd) + image_size;
75 
76 	if (cm_bo != NULL) {
77 		vmw_cursor_update_mob(dev_priv, cm_bo, cm_map, image,
78 				      width, height,
79 				      hotspotX, hotspotY);
80 		return;
81 	}
82 
83 	/* Try to reserve fifocmd space and swallow any failures;
84 	   such reservations cannot be left unconsumed for long
85 	   under the risk of clogging other fifocmd users, so
86 	   we treat reservations separtely from the way we treat
87 	   other fallible KMS-atomic resources at prepare_fb */
88 	cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
89 
90 	if (unlikely(cmd == NULL))
91 		return;
92 
93 	memset(cmd, 0, sizeof(*cmd));
94 
95 	memcpy(&cmd[1], image, image_size);
96 
97 	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
98 	cmd->cursor.id = 0;
99 	cmd->cursor.width = width;
100 	cmd->cursor.height = height;
101 	cmd->cursor.hotspotX = hotspotX;
102 	cmd->cursor.hotspotY = hotspotY;
103 
104 	vmw_cmd_commit_flush(dev_priv, cmd_size);
105 }
106 
107 /**
108  * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
109  *
110  * @dev_priv: device to work with
111  * @bo: BO for the MOB
112  * @map: kmap obj for the BO
113  * @image: cursor source data to fill the MOB with
114  * @width: source data width
115  * @height: source data height
116  * @hotspotX: cursor hotspot x
117  * @hotspotY: cursor hotspot Y
118  */
vmw_cursor_update_mob(struct vmw_private * dev_priv,struct ttm_buffer_object * bo,struct ttm_bo_kmap_obj * map,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)119 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
120 				  struct ttm_buffer_object *bo,
121 				  struct ttm_bo_kmap_obj *map,
122 				  u32 *image, u32 width, u32 height,
123 				  u32 hotspotX, u32 hotspotY)
124 {
125 	SVGAGBCursorHeader *header;
126 	SVGAGBAlphaCursorHeader *alpha_header;
127 	const u32 image_size = width * height * sizeof(*image);
128 	bool dummy;
129 
130 	BUG_ON(!image);
131 
132 	header = (SVGAGBCursorHeader *)ttm_kmap_obj_virtual(map, &dummy);
133 	alpha_header = &header->header.alphaHeader;
134 
135 	header->type = SVGA_ALPHA_CURSOR;
136 	header->sizeInBytes = image_size;
137 
138 	alpha_header->hotspotX = hotspotX;
139 	alpha_header->hotspotY = hotspotY;
140 	alpha_header->width = width;
141 	alpha_header->height = height;
142 
143 	memcpy(header + 1, image, image_size);
144 
145 	vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID, bo->resource->start);
146 }
147 
vmw_du_destroy_cursor_mob_array(struct vmw_cursor_plane * vcp)148 void vmw_du_destroy_cursor_mob_array(struct vmw_cursor_plane *vcp)
149 {
150 	size_t i;
151 
152 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mob); i++) {
153 		if (vcp->cursor_mob[i] != NULL) {
154 			ttm_bo_unpin(vcp->cursor_mob[i]);
155 			ttm_bo_put(vcp->cursor_mob[i]);
156 			kfree(vcp->cursor_mob[i]);
157 			vcp->cursor_mob[i] = NULL;
158 		}
159 	}
160 }
161 
162 #define CURSOR_MOB_SIZE(dimension) \
163 	((dimension) * (dimension) * sizeof(u32) + sizeof(SVGAGBCursorHeader))
164 
vmw_du_create_cursor_mob_array(struct vmw_cursor_plane * cursor)165 int vmw_du_create_cursor_mob_array(struct vmw_cursor_plane *cursor)
166 {
167 	struct vmw_private *dev_priv = cursor->base.dev->dev_private;
168 	uint32_t cursor_max_dim, mob_max_size;
169 	int ret = 0;
170 	size_t i;
171 
172 	if (!dev_priv->has_mob || (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
173 		return -ENOSYS;
174 
175 	mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
176 	cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
177 
178 	if (CURSOR_MOB_SIZE(cursor_max_dim) > mob_max_size)
179 		cursor_max_dim = 64; /* Mandatorily-supported cursor dimension */
180 
181 	for (i = 0; i < ARRAY_SIZE(cursor->cursor_mob); i++) {
182 		struct ttm_buffer_object **const bo = &cursor->cursor_mob[i];
183 
184 		ret = vmw_bo_create_kernel(dev_priv,
185 			CURSOR_MOB_SIZE(cursor_max_dim),
186 			&vmw_mob_placement, bo);
187 
188 		if (ret != 0)
189 			goto teardown;
190 
191 		if ((*bo)->resource->mem_type != VMW_PL_MOB) {
192 			DRM_ERROR("Obtained buffer object is not a MOB.\n");
193 			ret = -ENOSYS;
194 			goto teardown;
195 		}
196 
197 		/* Fence the mob creation so we are guarateed to have the mob */
198 		ret = ttm_bo_reserve(*bo, false, false, NULL);
199 
200 		if (ret != 0)
201 			goto teardown;
202 
203 		vmw_bo_fence_single(*bo, NULL);
204 
205 		ttm_bo_unreserve(*bo);
206 
207 		drm_info(&dev_priv->drm, "Using CursorMob mobid %lu, max dimension %u\n",
208 			 (*bo)->resource->start, cursor_max_dim);
209 	}
210 
211 	return 0;
212 
213 teardown:
214 	vmw_du_destroy_cursor_mob_array(cursor);
215 
216 	return ret;
217 }
218 
219 #undef CURSOR_MOB_SIZE
220 
vmw_cursor_update_bo(struct vmw_private * dev_priv,struct ttm_buffer_object * cm_bo,struct ttm_bo_kmap_obj * cm_map,struct vmw_buffer_object * bo,u32 width,u32 height,u32 hotspotX,u32 hotspotY)221 static void vmw_cursor_update_bo(struct vmw_private *dev_priv,
222 				 struct ttm_buffer_object *cm_bo,
223 				 struct ttm_bo_kmap_obj *cm_map,
224 				 struct vmw_buffer_object *bo,
225 				 u32 width, u32 height,
226 				 u32 hotspotX, u32 hotspotY)
227 {
228 	void *virtual;
229 	bool dummy;
230 
231 	virtual = ttm_kmap_obj_virtual(&bo->map, &dummy);
232 	if (virtual) {
233 		vmw_cursor_update_image(dev_priv, cm_bo, cm_map, virtual,
234 					width, height,
235 					hotspotX, hotspotY);
236 		atomic_dec(&bo->base_mapped_count);
237 	}
238 }
239 
240 
vmw_cursor_update_position(struct vmw_private * dev_priv,bool show,int x,int y)241 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
242 				       bool show, int x, int y)
243 {
244 	const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
245 					     : SVGA_CURSOR_ON_HIDE;
246 	uint32_t count;
247 
248 	spin_lock(&dev_priv->cursor_lock);
249 	if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
250 		vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
251 		vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
252 		vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
253 		vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
254 		vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
255 	} else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
256 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
257 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
258 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
259 		count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
260 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
261 	} else {
262 		vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
263 		vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
264 		vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
265 	}
266 	spin_unlock(&dev_priv->cursor_lock);
267 }
268 
269 
vmw_kms_cursor_snoop(struct vmw_surface * srf,struct ttm_object_file * tfile,struct ttm_buffer_object * bo,SVGA3dCmdHeader * header)270 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
271 			  struct ttm_object_file *tfile,
272 			  struct ttm_buffer_object *bo,
273 			  SVGA3dCmdHeader *header)
274 {
275 	struct ttm_bo_kmap_obj map;
276 	unsigned long kmap_offset;
277 	unsigned long kmap_num;
278 	SVGA3dCopyBox *box;
279 	unsigned box_count;
280 	void *virtual;
281 	bool dummy;
282 	struct vmw_dma_cmd {
283 		SVGA3dCmdHeader header;
284 		SVGA3dCmdSurfaceDMA dma;
285 	} *cmd;
286 	int i, ret;
287 
288 	cmd = container_of(header, struct vmw_dma_cmd, header);
289 
290 	/* No snooper installed */
291 	if (!srf->snooper.image)
292 		return;
293 
294 	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
295 		DRM_ERROR("face and mipmap for cursors should never != 0\n");
296 		return;
297 	}
298 
299 	if (cmd->header.size < 64) {
300 		DRM_ERROR("at least one full copy box must be given\n");
301 		return;
302 	}
303 
304 	box = (SVGA3dCopyBox *)&cmd[1];
305 	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
306 			sizeof(SVGA3dCopyBox);
307 
308 	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
309 	    box->x != 0    || box->y != 0    || box->z != 0    ||
310 	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
311 	    box->d != 1    || box_count != 1) {
312 		/* TODO handle none page aligned offsets */
313 		/* TODO handle more dst & src != 0 */
314 		/* TODO handle more then one copy */
315 		DRM_ERROR("Can't snoop dma request for cursor!\n");
316 		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
317 			  box->srcx, box->srcy, box->srcz,
318 			  box->x, box->y, box->z,
319 			  box->w, box->h, box->d, box_count,
320 			  cmd->dma.guest.ptr.offset);
321 		return;
322 	}
323 
324 	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
325 	kmap_num = (64*64*4) >> PAGE_SHIFT;
326 
327 	ret = ttm_bo_reserve(bo, true, false, NULL);
328 	if (unlikely(ret != 0)) {
329 		DRM_ERROR("reserve failed\n");
330 		return;
331 	}
332 
333 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
334 	if (unlikely(ret != 0))
335 		goto err_unreserve;
336 
337 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
338 
339 	if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
340 		memcpy(srf->snooper.image, virtual, 64*64*4);
341 	} else {
342 		/* Image is unsigned pointer. */
343 		for (i = 0; i < box->h; i++)
344 			memcpy(srf->snooper.image + i * 64,
345 			       virtual + i * cmd->dma.guest.pitch,
346 			       box->w * 4);
347 	}
348 
349 	srf->snooper.age++;
350 
351 	ttm_bo_kunmap(&map);
352 err_unreserve:
353 	ttm_bo_unreserve(bo);
354 }
355 
356 /**
357  * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
358  *
359  * @dev_priv: Pointer to the device private struct.
360  *
361  * Clears all legacy hotspots.
362  */
vmw_kms_legacy_hotspot_clear(struct vmw_private * dev_priv)363 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
364 {
365 	struct drm_device *dev = &dev_priv->drm;
366 	struct vmw_display_unit *du;
367 	struct drm_crtc *crtc;
368 
369 	drm_modeset_lock_all(dev);
370 	drm_for_each_crtc(crtc, dev) {
371 		du = vmw_crtc_to_du(crtc);
372 
373 		du->hotspot_x = 0;
374 		du->hotspot_y = 0;
375 	}
376 	drm_modeset_unlock_all(dev);
377 }
378 
vmw_kms_cursor_post_execbuf(struct vmw_private * dev_priv)379 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
380 {
381 	struct drm_device *dev = &dev_priv->drm;
382 	struct vmw_display_unit *du;
383 	struct drm_crtc *crtc;
384 
385 	mutex_lock(&dev->mode_config.mutex);
386 
387 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
388 		du = vmw_crtc_to_du(crtc);
389 		if (!du->cursor_surface ||
390 		    du->cursor_age == du->cursor_surface->snooper.age)
391 			continue;
392 
393 		du->cursor_age = du->cursor_surface->snooper.age;
394 		vmw_cursor_update_image(dev_priv, NULL, NULL,
395 					du->cursor_surface->snooper.image,
396 					64, 64,
397 					du->hotspot_x + du->core_hotspot_x,
398 					du->hotspot_y + du->core_hotspot_y);
399 	}
400 
401 	mutex_unlock(&dev->mode_config.mutex);
402 }
403 
404 
vmw_du_cursor_plane_destroy(struct drm_plane * plane)405 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
406 {
407 	vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
408 	vmw_du_destroy_cursor_mob_array(vmw_plane_to_vcp(plane));
409 	drm_plane_cleanup(plane);
410 }
411 
412 
vmw_du_primary_plane_destroy(struct drm_plane * plane)413 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
414 {
415 	drm_plane_cleanup(plane);
416 
417 	/* Planes are static in our case so we don't free it */
418 }
419 
420 
421 /**
422  * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
423  *
424  * @vps: plane state associated with the display surface
425  * @unreference: true if we also want to unreference the display.
426  */
vmw_du_plane_unpin_surf(struct vmw_plane_state * vps,bool unreference)427 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
428 			     bool unreference)
429 {
430 	if (vps->surf) {
431 		if (vps->pinned) {
432 			vmw_resource_unpin(&vps->surf->res);
433 			vps->pinned--;
434 		}
435 
436 		if (unreference) {
437 			if (vps->pinned)
438 				DRM_ERROR("Surface still pinned\n");
439 			vmw_surface_unreference(&vps->surf);
440 		}
441 	}
442 }
443 
444 
445 /**
446  * vmw_du_plane_cleanup_fb - Unpins the plane surface
447  *
448  * @plane:  display plane
449  * @old_state: Contains the FB to clean up
450  *
451  * Unpins the framebuffer surface
452  *
453  * Returns 0 on success
454  */
455 void
vmw_du_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)456 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
457 			struct drm_plane_state *old_state)
458 {
459 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
460 
461 	vmw_du_plane_unpin_surf(vps, false);
462 }
463 
464 
465 /**
466  * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
467  *
468  * @plane: cursor plane
469  * @old_state: contains the state to clean up
470  *
471  * Unmaps all cursor bo mappings and unpins the cursor surface
472  *
473  * Returns 0 on success
474  */
475 void
vmw_du_cursor_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)476 vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
477 			       struct drm_plane_state *old_state)
478 {
479 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
480 	bool dummy;
481 
482 	if (vps->bo != NULL && ttm_kmap_obj_virtual(&vps->bo->map, &dummy) != NULL) {
483 		const int ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
484 
485 		if (likely(ret == 0)) {
486 			if (atomic_read(&vps->bo->base_mapped_count) == 0)
487 			    ttm_bo_kunmap(&vps->bo->map);
488 			ttm_bo_unreserve(&vps->bo->base);
489 		}
490 	}
491 
492 	if (vps->cm_bo != NULL && ttm_kmap_obj_virtual(&vps->cm_map, &dummy) != NULL) {
493 		const int ret = ttm_bo_reserve(vps->cm_bo, true, false, NULL);
494 
495 		if (likely(ret == 0)) {
496 			ttm_bo_kunmap(&vps->cm_map);
497 			ttm_bo_unreserve(vps->cm_bo);
498 		}
499 	}
500 
501 	vmw_du_plane_unpin_surf(vps, false);
502 
503 	if (vps->surf) {
504 		vmw_surface_unreference(&vps->surf);
505 		vps->surf = NULL;
506 	}
507 
508 	if (vps->bo) {
509 		vmw_bo_unreference(&vps->bo);
510 		vps->bo = NULL;
511 	}
512 }
513 
514 /**
515  * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
516  *
517  * @plane:  display plane
518  * @new_state: info on the new plane state, including the FB
519  *
520  * Returns 0 on success
521  */
522 int
vmw_du_cursor_plane_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)523 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
524 			       struct drm_plane_state *new_state)
525 {
526 	struct drm_framebuffer *fb = new_state->fb;
527 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
528 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
529 	struct ttm_buffer_object *cm_bo = NULL;
530 	bool dummy;
531 	int ret = 0;
532 
533 	if (vps->surf) {
534 		vmw_surface_unreference(&vps->surf);
535 		vps->surf = NULL;
536 	}
537 
538 	if (vps->bo) {
539 		vmw_bo_unreference(&vps->bo);
540 		vps->bo = NULL;
541 	}
542 
543 	if (fb) {
544 		if (vmw_framebuffer_to_vfb(fb)->bo) {
545 			vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
546 			vmw_bo_reference(vps->bo);
547 		} else {
548 			vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
549 			vmw_surface_reference(vps->surf);
550 		}
551 	}
552 
553 	vps->cm_bo = NULL;
554 
555 	if (vps->surf == NULL && vps->bo != NULL) {
556 		const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
557 
558 		/* Not using vmw_bo_map_and_cache() helper here as we need to reserve
559 		   the ttm_buffer_object first which wmw_bo_map_and_cache() omits. */
560 		ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
561 
562 		if (unlikely(ret != 0))
563 			return -ENOMEM;
564 
565 		ret = ttm_bo_kmap(&vps->bo->base, 0, PFN_UP(size), &vps->bo->map);
566 
567 		if (likely(ret == 0))
568 			atomic_inc(&vps->bo->base_mapped_count);
569 
570 		ttm_bo_unreserve(&vps->bo->base);
571 
572 		if (unlikely(ret != 0))
573 			return -ENOMEM;
574 	}
575 
576 	if (vps->surf || vps->bo) {
577 		unsigned cursor_mob_idx = vps->cursor_mob_idx;
578 
579 		/* Lazily set up cursor MOBs just once -- no reattempts. */
580 		if (cursor_mob_idx == 0 && vcp->cursor_mob[0] == NULL)
581 			if (vmw_du_create_cursor_mob_array(vcp) != 0)
582 				vps->cursor_mob_idx = cursor_mob_idx = -1U;
583 
584 		if (cursor_mob_idx < ARRAY_SIZE(vcp->cursor_mob)) {
585 			const u32 size = sizeof(SVGAGBCursorHeader) +
586 				new_state->crtc_w * new_state->crtc_h * sizeof(u32);
587 
588 			cm_bo = vcp->cursor_mob[cursor_mob_idx];
589 
590 			if (cm_bo->resource->num_pages * PAGE_SIZE < size) {
591 				ret = -EINVAL;
592 				goto error_bo_unmap;
593 			}
594 
595 			ret = ttm_bo_reserve(cm_bo, false, false, NULL);
596 
597 			if (unlikely(ret != 0)) {
598 				ret = -ENOMEM;
599 				goto error_bo_unmap;
600 			}
601 
602 			ret = ttm_bo_kmap(cm_bo, 0, PFN_UP(size), &vps->cm_map);
603 
604 			/*
605 			 * We just want to try to get mob bind to finish
606 			 * so that the first write to SVGA_REG_CURSOR_MOBID
607 			 * is done with a buffer that the device has already
608 			 * seen
609 			 */
610 			(void) ttm_bo_wait(cm_bo, false, false);
611 
612 			ttm_bo_unreserve(cm_bo);
613 
614 			if (unlikely(ret != 0)) {
615 				ret = -ENOMEM;
616 				goto error_bo_unmap;
617 			}
618 
619 			vps->cursor_mob_idx = cursor_mob_idx ^ 1;
620 			vps->cm_bo = cm_bo;
621 		}
622 	}
623 
624 	return 0;
625 
626 error_bo_unmap:
627 	if (vps->bo != NULL && ttm_kmap_obj_virtual(&vps->bo->map, &dummy) != NULL) {
628 		const int ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
629 		if (likely(ret == 0)) {
630 			atomic_dec(&vps->bo->base_mapped_count);
631 			ttm_bo_kunmap(&vps->bo->map);
632 			ttm_bo_unreserve(&vps->bo->base);
633 		}
634 	}
635 
636 	return ret;
637 }
638 
639 
640 void
vmw_du_cursor_plane_atomic_update(struct drm_plane * plane,struct drm_atomic_state * state)641 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
642 				  struct drm_atomic_state *state)
643 {
644 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
645 									   plane);
646 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
647 									   plane);
648 	struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
649 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
650 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
651 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
652 	s32 hotspot_x, hotspot_y;
653 
654 	hotspot_x = du->hotspot_x;
655 	hotspot_y = du->hotspot_y;
656 
657 	if (new_state->fb) {
658 		hotspot_x += new_state->fb->hot_x;
659 		hotspot_y += new_state->fb->hot_y;
660 	}
661 
662 	du->cursor_surface = vps->surf;
663 	du->cursor_bo = vps->bo;
664 
665 	if (vps->surf) {
666 		du->cursor_age = du->cursor_surface->snooper.age;
667 
668 		vmw_cursor_update_image(dev_priv, vps->cm_bo, &vps->cm_map,
669 					vps->surf->snooper.image,
670 					new_state->crtc_w,
671 					new_state->crtc_h,
672 					hotspot_x, hotspot_y);
673 	} else if (vps->bo) {
674 		vmw_cursor_update_bo(dev_priv, vps->cm_bo, &vps->cm_map,
675 				     vps->bo,
676 				     new_state->crtc_w,
677 				     new_state->crtc_h,
678 				     hotspot_x, hotspot_y);
679 	} else {
680 		vmw_cursor_update_position(dev_priv, false, 0, 0);
681 		return;
682 	}
683 
684 	du->cursor_x = new_state->crtc_x + du->set_gui_x;
685 	du->cursor_y = new_state->crtc_y + du->set_gui_y;
686 
687 	vmw_cursor_update_position(dev_priv, true,
688 				   du->cursor_x + hotspot_x,
689 				   du->cursor_y + hotspot_y);
690 
691 	du->core_hotspot_x = hotspot_x - du->hotspot_x;
692 	du->core_hotspot_y = hotspot_y - du->hotspot_y;
693 }
694 
695 
696 /**
697  * vmw_du_primary_plane_atomic_check - check if the new state is okay
698  *
699  * @plane: display plane
700  * @state: info on the new plane state, including the FB
701  *
702  * Check if the new state is settable given the current state.  Other
703  * than what the atomic helper checks, we care about crtc fitting
704  * the FB and maintaining one active framebuffer.
705  *
706  * Returns 0 on success
707  */
vmw_du_primary_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)708 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
709 				      struct drm_atomic_state *state)
710 {
711 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
712 									   plane);
713 	struct drm_crtc_state *crtc_state = NULL;
714 	struct drm_framebuffer *new_fb = new_state->fb;
715 	int ret;
716 
717 	if (new_state->crtc)
718 		crtc_state = drm_atomic_get_new_crtc_state(state,
719 							   new_state->crtc);
720 
721 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
722 						  DRM_PLANE_NO_SCALING,
723 						  DRM_PLANE_NO_SCALING,
724 						  false, true);
725 
726 	if (!ret && new_fb) {
727 		struct drm_crtc *crtc = new_state->crtc;
728 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
729 
730 		vmw_connector_state_to_vcs(du->connector.state);
731 	}
732 
733 
734 	return ret;
735 }
736 
737 
738 /**
739  * vmw_du_cursor_plane_atomic_check - check if the new state is okay
740  *
741  * @plane: cursor plane
742  * @state: info on the new plane state
743  *
744  * This is a chance to fail if the new cursor state does not fit
745  * our requirements.
746  *
747  * Returns 0 on success
748  */
vmw_du_cursor_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)749 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
750 				     struct drm_atomic_state *state)
751 {
752 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
753 									   plane);
754 	int ret = 0;
755 	struct drm_crtc_state *crtc_state = NULL;
756 	struct vmw_surface *surface = NULL;
757 	struct drm_framebuffer *fb = new_state->fb;
758 
759 	if (new_state->crtc)
760 		crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
761 							   new_state->crtc);
762 
763 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
764 						  DRM_PLANE_NO_SCALING,
765 						  DRM_PLANE_NO_SCALING,
766 						  true, true);
767 	if (ret)
768 		return ret;
769 
770 	/* Turning off */
771 	if (!fb)
772 		return 0;
773 
774 	/* A lot of the code assumes this */
775 	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
776 		DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
777 			  new_state->crtc_w, new_state->crtc_h);
778 		return -EINVAL;
779 	}
780 
781 	if (!vmw_framebuffer_to_vfb(fb)->bo)
782 		surface = vmw_framebuffer_to_vfbs(fb)->surface;
783 
784 	if (surface && !surface->snooper.image) {
785 		DRM_ERROR("surface not suitable for cursor\n");
786 		return -EINVAL;
787 	}
788 
789 	return 0;
790 }
791 
792 
vmw_du_crtc_atomic_check(struct drm_crtc * crtc,struct drm_atomic_state * state)793 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
794 			     struct drm_atomic_state *state)
795 {
796 	struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
797 									 crtc);
798 	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
799 	int connector_mask = drm_connector_mask(&du->connector);
800 	bool has_primary = new_state->plane_mask &
801 			   drm_plane_mask(crtc->primary);
802 
803 	/* We always want to have an active plane with an active CRTC */
804 	if (has_primary != new_state->enable)
805 		return -EINVAL;
806 
807 
808 	if (new_state->connector_mask != connector_mask &&
809 	    new_state->connector_mask != 0) {
810 		DRM_ERROR("Invalid connectors configuration\n");
811 		return -EINVAL;
812 	}
813 
814 	/*
815 	 * Our virtual device does not have a dot clock, so use the logical
816 	 * clock value as the dot clock.
817 	 */
818 	if (new_state->mode.crtc_clock == 0)
819 		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
820 
821 	return 0;
822 }
823 
824 
vmw_du_crtc_atomic_begin(struct drm_crtc * crtc,struct drm_atomic_state * state)825 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
826 			      struct drm_atomic_state *state)
827 {
828 }
829 
830 
vmw_du_crtc_atomic_flush(struct drm_crtc * crtc,struct drm_atomic_state * state)831 void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
832 			      struct drm_atomic_state *state)
833 {
834 	struct drm_pending_vblank_event *event = crtc->state->event;
835 
836 	if (event) {
837 		crtc->state->event = NULL;
838 
839 		spin_lock_irq(&crtc->dev->event_lock);
840 		drm_crtc_send_vblank_event(crtc, event);
841 		spin_unlock_irq(&crtc->dev->event_lock);
842 	}
843 }
844 
845 
846 /**
847  * vmw_du_crtc_duplicate_state - duplicate crtc state
848  * @crtc: DRM crtc
849  *
850  * Allocates and returns a copy of the crtc state (both common and
851  * vmw-specific) for the specified crtc.
852  *
853  * Returns: The newly allocated crtc state, or NULL on failure.
854  */
855 struct drm_crtc_state *
vmw_du_crtc_duplicate_state(struct drm_crtc * crtc)856 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
857 {
858 	struct drm_crtc_state *state;
859 	struct vmw_crtc_state *vcs;
860 
861 	if (WARN_ON(!crtc->state))
862 		return NULL;
863 
864 	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
865 
866 	if (!vcs)
867 		return NULL;
868 
869 	state = &vcs->base;
870 
871 	__drm_atomic_helper_crtc_duplicate_state(crtc, state);
872 
873 	return state;
874 }
875 
876 
877 /**
878  * vmw_du_crtc_reset - creates a blank vmw crtc state
879  * @crtc: DRM crtc
880  *
881  * Resets the atomic state for @crtc by freeing the state pointer (which
882  * might be NULL, e.g. at driver load time) and allocating a new empty state
883  * object.
884  */
vmw_du_crtc_reset(struct drm_crtc * crtc)885 void vmw_du_crtc_reset(struct drm_crtc *crtc)
886 {
887 	struct vmw_crtc_state *vcs;
888 
889 
890 	if (crtc->state) {
891 		__drm_atomic_helper_crtc_destroy_state(crtc->state);
892 
893 		kfree(vmw_crtc_state_to_vcs(crtc->state));
894 	}
895 
896 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
897 
898 	if (!vcs) {
899 		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
900 		return;
901 	}
902 
903 	__drm_atomic_helper_crtc_reset(crtc, &vcs->base);
904 }
905 
906 
907 /**
908  * vmw_du_crtc_destroy_state - destroy crtc state
909  * @crtc: DRM crtc
910  * @state: state object to destroy
911  *
912  * Destroys the crtc state (both common and vmw-specific) for the
913  * specified plane.
914  */
915 void
vmw_du_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)916 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
917 			  struct drm_crtc_state *state)
918 {
919 	drm_atomic_helper_crtc_destroy_state(crtc, state);
920 }
921 
922 
923 /**
924  * vmw_du_plane_duplicate_state - duplicate plane state
925  * @plane: drm plane
926  *
927  * Allocates and returns a copy of the plane state (both common and
928  * vmw-specific) for the specified plane.
929  *
930  * Returns: The newly allocated plane state, or NULL on failure.
931  */
932 struct drm_plane_state *
vmw_du_plane_duplicate_state(struct drm_plane * plane)933 vmw_du_plane_duplicate_state(struct drm_plane *plane)
934 {
935 	struct drm_plane_state *state;
936 	struct vmw_plane_state *vps;
937 
938 	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
939 
940 	if (!vps)
941 		return NULL;
942 
943 	vps->pinned = 0;
944 	vps->cpp = 0;
945 
946 	/* Each ref counted resource needs to be acquired again */
947 	if (vps->surf)
948 		(void) vmw_surface_reference(vps->surf);
949 
950 	if (vps->bo)
951 		(void) vmw_bo_reference(vps->bo);
952 
953 	state = &vps->base;
954 
955 	__drm_atomic_helper_plane_duplicate_state(plane, state);
956 
957 	return state;
958 }
959 
960 
961 /**
962  * vmw_du_plane_reset - creates a blank vmw plane state
963  * @plane: drm plane
964  *
965  * Resets the atomic state for @plane by freeing the state pointer (which might
966  * be NULL, e.g. at driver load time) and allocating a new empty state object.
967  */
vmw_du_plane_reset(struct drm_plane * plane)968 void vmw_du_plane_reset(struct drm_plane *plane)
969 {
970 	struct vmw_plane_state *vps;
971 
972 	if (plane->state)
973 		vmw_du_plane_destroy_state(plane, plane->state);
974 
975 	vps = kzalloc(sizeof(*vps), GFP_KERNEL);
976 
977 	if (!vps) {
978 		DRM_ERROR("Cannot allocate vmw_plane_state\n");
979 		return;
980 	}
981 
982 	__drm_atomic_helper_plane_reset(plane, &vps->base);
983 }
984 
985 
986 /**
987  * vmw_du_plane_destroy_state - destroy plane state
988  * @plane: DRM plane
989  * @state: state object to destroy
990  *
991  * Destroys the plane state (both common and vmw-specific) for the
992  * specified plane.
993  */
994 void
vmw_du_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)995 vmw_du_plane_destroy_state(struct drm_plane *plane,
996 			   struct drm_plane_state *state)
997 {
998 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
999 
1000 
1001 	/* Should have been freed by cleanup_fb */
1002 	if (vps->surf)
1003 		vmw_surface_unreference(&vps->surf);
1004 
1005 	if (vps->bo)
1006 		vmw_bo_unreference(&vps->bo);
1007 
1008 	drm_atomic_helper_plane_destroy_state(plane, state);
1009 }
1010 
1011 
1012 /**
1013  * vmw_du_connector_duplicate_state - duplicate connector state
1014  * @connector: DRM connector
1015  *
1016  * Allocates and returns a copy of the connector state (both common and
1017  * vmw-specific) for the specified connector.
1018  *
1019  * Returns: The newly allocated connector state, or NULL on failure.
1020  */
1021 struct drm_connector_state *
vmw_du_connector_duplicate_state(struct drm_connector * connector)1022 vmw_du_connector_duplicate_state(struct drm_connector *connector)
1023 {
1024 	struct drm_connector_state *state;
1025 	struct vmw_connector_state *vcs;
1026 
1027 	if (WARN_ON(!connector->state))
1028 		return NULL;
1029 
1030 	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1031 
1032 	if (!vcs)
1033 		return NULL;
1034 
1035 	state = &vcs->base;
1036 
1037 	__drm_atomic_helper_connector_duplicate_state(connector, state);
1038 
1039 	return state;
1040 }
1041 
1042 
1043 /**
1044  * vmw_du_connector_reset - creates a blank vmw connector state
1045  * @connector: DRM connector
1046  *
1047  * Resets the atomic state for @connector by freeing the state pointer (which
1048  * might be NULL, e.g. at driver load time) and allocating a new empty state
1049  * object.
1050  */
vmw_du_connector_reset(struct drm_connector * connector)1051 void vmw_du_connector_reset(struct drm_connector *connector)
1052 {
1053 	struct vmw_connector_state *vcs;
1054 
1055 
1056 	if (connector->state) {
1057 		__drm_atomic_helper_connector_destroy_state(connector->state);
1058 
1059 		kfree(vmw_connector_state_to_vcs(connector->state));
1060 	}
1061 
1062 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1063 
1064 	if (!vcs) {
1065 		DRM_ERROR("Cannot allocate vmw_connector_state\n");
1066 		return;
1067 	}
1068 
1069 	__drm_atomic_helper_connector_reset(connector, &vcs->base);
1070 }
1071 
1072 
1073 /**
1074  * vmw_du_connector_destroy_state - destroy connector state
1075  * @connector: DRM connector
1076  * @state: state object to destroy
1077  *
1078  * Destroys the connector state (both common and vmw-specific) for the
1079  * specified plane.
1080  */
1081 void
vmw_du_connector_destroy_state(struct drm_connector * connector,struct drm_connector_state * state)1082 vmw_du_connector_destroy_state(struct drm_connector *connector,
1083 			  struct drm_connector_state *state)
1084 {
1085 	drm_atomic_helper_connector_destroy_state(connector, state);
1086 }
1087 /*
1088  * Generic framebuffer code
1089  */
1090 
1091 /*
1092  * Surface framebuffer code
1093  */
1094 
vmw_framebuffer_surface_destroy(struct drm_framebuffer * framebuffer)1095 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1096 {
1097 	struct vmw_framebuffer_surface *vfbs =
1098 		vmw_framebuffer_to_vfbs(framebuffer);
1099 
1100 	drm_framebuffer_cleanup(framebuffer);
1101 	vmw_surface_unreference(&vfbs->surface);
1102 
1103 	kfree(vfbs);
1104 }
1105 
1106 /**
1107  * vmw_kms_readback - Perform a readback from the screen system to
1108  * a buffer-object backed framebuffer.
1109  *
1110  * @dev_priv: Pointer to the device private structure.
1111  * @file_priv: Pointer to a struct drm_file identifying the caller.
1112  * Must be set to NULL if @user_fence_rep is NULL.
1113  * @vfb: Pointer to the buffer-object backed framebuffer.
1114  * @user_fence_rep: User-space provided structure for fence information.
1115  * Must be set to non-NULL if @file_priv is non-NULL.
1116  * @vclips: Array of clip rects.
1117  * @num_clips: Number of clip rects in @vclips.
1118  *
1119  * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1120  * interrupted.
1121  */
vmw_kms_readback(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct drm_vmw_fence_rep __user * user_fence_rep,struct drm_vmw_rect * vclips,uint32_t num_clips)1122 int vmw_kms_readback(struct vmw_private *dev_priv,
1123 		     struct drm_file *file_priv,
1124 		     struct vmw_framebuffer *vfb,
1125 		     struct drm_vmw_fence_rep __user *user_fence_rep,
1126 		     struct drm_vmw_rect *vclips,
1127 		     uint32_t num_clips)
1128 {
1129 	switch (dev_priv->active_display_unit) {
1130 	case vmw_du_screen_object:
1131 		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1132 					    user_fence_rep, vclips, num_clips,
1133 					    NULL);
1134 	case vmw_du_screen_target:
1135 		return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
1136 					user_fence_rep, NULL, vclips, num_clips,
1137 					1, false, true, NULL);
1138 	default:
1139 		WARN_ONCE(true,
1140 			  "Readback called with invalid display system.\n");
1141 }
1142 
1143 	return -ENOSYS;
1144 }
1145 
1146 
1147 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1148 	.destroy = vmw_framebuffer_surface_destroy,
1149 	.dirty = drm_atomic_helper_dirtyfb,
1150 };
1151 
vmw_kms_new_framebuffer_surface(struct vmw_private * dev_priv,struct vmw_surface * surface,struct vmw_framebuffer ** out,const struct drm_mode_fb_cmd2 * mode_cmd,bool is_bo_proxy)1152 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1153 					   struct vmw_surface *surface,
1154 					   struct vmw_framebuffer **out,
1155 					   const struct drm_mode_fb_cmd2
1156 					   *mode_cmd,
1157 					   bool is_bo_proxy)
1158 
1159 {
1160 	struct drm_device *dev = &dev_priv->drm;
1161 	struct vmw_framebuffer_surface *vfbs;
1162 	enum SVGA3dSurfaceFormat format;
1163 	int ret;
1164 
1165 	/* 3D is only supported on HWv8 and newer hosts */
1166 	if (dev_priv->active_display_unit == vmw_du_legacy)
1167 		return -ENOSYS;
1168 
1169 	/*
1170 	 * Sanity checks.
1171 	 */
1172 
1173 	if (!drm_any_plane_has_format(&dev_priv->drm,
1174 				      mode_cmd->pixel_format,
1175 				      mode_cmd->modifier[0])) {
1176 		drm_dbg(&dev_priv->drm,
1177 			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1178 			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1179 		return -EINVAL;
1180 	}
1181 
1182 	/* Surface must be marked as a scanout. */
1183 	if (unlikely(!surface->metadata.scanout))
1184 		return -EINVAL;
1185 
1186 	if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1187 		     surface->metadata.num_sizes != 1 ||
1188 		     surface->metadata.base_size.width < mode_cmd->width ||
1189 		     surface->metadata.base_size.height < mode_cmd->height ||
1190 		     surface->metadata.base_size.depth != 1)) {
1191 		DRM_ERROR("Incompatible surface dimensions "
1192 			  "for requested mode.\n");
1193 		return -EINVAL;
1194 	}
1195 
1196 	switch (mode_cmd->pixel_format) {
1197 	case DRM_FORMAT_ARGB8888:
1198 		format = SVGA3D_A8R8G8B8;
1199 		break;
1200 	case DRM_FORMAT_XRGB8888:
1201 		format = SVGA3D_X8R8G8B8;
1202 		break;
1203 	case DRM_FORMAT_RGB565:
1204 		format = SVGA3D_R5G6B5;
1205 		break;
1206 	case DRM_FORMAT_XRGB1555:
1207 		format = SVGA3D_A1R5G5B5;
1208 		break;
1209 	default:
1210 		DRM_ERROR("Invalid pixel format: %p4cc\n",
1211 			  &mode_cmd->pixel_format);
1212 		return -EINVAL;
1213 	}
1214 
1215 	/*
1216 	 * For DX, surface format validation is done when surface->scanout
1217 	 * is set.
1218 	 */
1219 	if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
1220 		DRM_ERROR("Invalid surface format for requested mode.\n");
1221 		return -EINVAL;
1222 	}
1223 
1224 	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1225 	if (!vfbs) {
1226 		ret = -ENOMEM;
1227 		goto out_err1;
1228 	}
1229 
1230 	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1231 	vfbs->surface = vmw_surface_reference(surface);
1232 	vfbs->base.user_handle = mode_cmd->handles[0];
1233 	vfbs->is_bo_proxy = is_bo_proxy;
1234 
1235 	*out = &vfbs->base;
1236 
1237 	ret = drm_framebuffer_init(dev, &vfbs->base.base,
1238 				   &vmw_framebuffer_surface_funcs);
1239 	if (ret)
1240 		goto out_err2;
1241 
1242 	return 0;
1243 
1244 out_err2:
1245 	vmw_surface_unreference(&surface);
1246 	kfree(vfbs);
1247 out_err1:
1248 	return ret;
1249 }
1250 
1251 /*
1252  * Buffer-object framebuffer code
1253  */
1254 
vmw_framebuffer_bo_create_handle(struct drm_framebuffer * fb,struct drm_file * file_priv,unsigned int * handle)1255 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1256 					    struct drm_file *file_priv,
1257 					    unsigned int *handle)
1258 {
1259 	struct vmw_framebuffer_bo *vfbd =
1260 			vmw_framebuffer_to_vfbd(fb);
1261 
1262 	return drm_gem_handle_create(file_priv, &vfbd->buffer->base.base, handle);
1263 }
1264 
vmw_framebuffer_bo_destroy(struct drm_framebuffer * framebuffer)1265 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1266 {
1267 	struct vmw_framebuffer_bo *vfbd =
1268 		vmw_framebuffer_to_vfbd(framebuffer);
1269 
1270 	drm_framebuffer_cleanup(framebuffer);
1271 	vmw_bo_unreference(&vfbd->buffer);
1272 
1273 	kfree(vfbd);
1274 }
1275 
vmw_framebuffer_bo_dirty(struct drm_framebuffer * framebuffer,struct drm_file * file_priv,unsigned int flags,unsigned int color,struct drm_clip_rect * clips,unsigned int num_clips)1276 static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
1277 				    struct drm_file *file_priv,
1278 				    unsigned int flags, unsigned int color,
1279 				    struct drm_clip_rect *clips,
1280 				    unsigned int num_clips)
1281 {
1282 	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1283 	struct vmw_framebuffer_bo *vfbd =
1284 		vmw_framebuffer_to_vfbd(framebuffer);
1285 	struct drm_clip_rect norect;
1286 	int ret, increment = 1;
1287 
1288 	drm_modeset_lock_all(&dev_priv->drm);
1289 
1290 	if (!num_clips) {
1291 		num_clips = 1;
1292 		clips = &norect;
1293 		norect.x1 = norect.y1 = 0;
1294 		norect.x2 = framebuffer->width;
1295 		norect.y2 = framebuffer->height;
1296 	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
1297 		num_clips /= 2;
1298 		increment = 2;
1299 	}
1300 
1301 	switch (dev_priv->active_display_unit) {
1302 	case vmw_du_legacy:
1303 		ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
1304 					      clips, num_clips, increment);
1305 		break;
1306 	default:
1307 		ret = -EINVAL;
1308 		WARN_ONCE(true, "Dirty called with invalid display system.\n");
1309 		break;
1310 	}
1311 
1312 	vmw_cmd_flush(dev_priv, false);
1313 
1314 	drm_modeset_unlock_all(&dev_priv->drm);
1315 
1316 	return ret;
1317 }
1318 
vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer * framebuffer,struct drm_file * file_priv,unsigned int flags,unsigned int color,struct drm_clip_rect * clips,unsigned int num_clips)1319 static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
1320 					struct drm_file *file_priv,
1321 					unsigned int flags, unsigned int color,
1322 					struct drm_clip_rect *clips,
1323 					unsigned int num_clips)
1324 {
1325 	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1326 
1327 	if (dev_priv->active_display_unit == vmw_du_legacy &&
1328 	    vmw_cmd_supported(dev_priv))
1329 		return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
1330 						color, clips, num_clips);
1331 
1332 	return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color,
1333 					 clips, num_clips);
1334 }
1335 
1336 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1337 	.create_handle = vmw_framebuffer_bo_create_handle,
1338 	.destroy = vmw_framebuffer_bo_destroy,
1339 	.dirty = vmw_framebuffer_bo_dirty_ext,
1340 };
1341 
1342 /*
1343  * Pin the bofer in a location suitable for access by the
1344  * display system.
1345  */
vmw_framebuffer_pin(struct vmw_framebuffer * vfb)1346 static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
1347 {
1348 	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1349 	struct vmw_buffer_object *buf;
1350 	struct ttm_placement *placement;
1351 	int ret;
1352 
1353 	buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1354 		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1355 
1356 	if (!buf)
1357 		return 0;
1358 
1359 	switch (dev_priv->active_display_unit) {
1360 	case vmw_du_legacy:
1361 		vmw_overlay_pause_all(dev_priv);
1362 		ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
1363 		vmw_overlay_resume_all(dev_priv);
1364 		break;
1365 	case vmw_du_screen_object:
1366 	case vmw_du_screen_target:
1367 		if (vfb->bo) {
1368 			if (dev_priv->capabilities & SVGA_CAP_3D) {
1369 				/*
1370 				 * Use surface DMA to get content to
1371 				 * sreen target surface.
1372 				 */
1373 				placement = &vmw_vram_gmr_placement;
1374 			} else {
1375 				/* Use CPU blit. */
1376 				placement = &vmw_sys_placement;
1377 			}
1378 		} else {
1379 			/* Use surface / image update */
1380 			placement = &vmw_mob_placement;
1381 		}
1382 
1383 		return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
1384 	default:
1385 		return -EINVAL;
1386 	}
1387 
1388 	return ret;
1389 }
1390 
vmw_framebuffer_unpin(struct vmw_framebuffer * vfb)1391 static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
1392 {
1393 	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1394 	struct vmw_buffer_object *buf;
1395 
1396 	buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1397 		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1398 
1399 	if (WARN_ON(!buf))
1400 		return 0;
1401 
1402 	return vmw_bo_unpin(dev_priv, buf, false);
1403 }
1404 
1405 /**
1406  * vmw_create_bo_proxy - create a proxy surface for the buffer object
1407  *
1408  * @dev: DRM device
1409  * @mode_cmd: parameters for the new surface
1410  * @bo_mob: MOB backing the buffer object
1411  * @srf_out: newly created surface
1412  *
1413  * When the content FB is a buffer object, we create a surface as a proxy to the
1414  * same buffer.  This way we can do a surface copy rather than a surface DMA.
1415  * This is a more efficient approach
1416  *
1417  * RETURNS:
1418  * 0 on success, error code otherwise
1419  */
vmw_create_bo_proxy(struct drm_device * dev,const struct drm_mode_fb_cmd2 * mode_cmd,struct vmw_buffer_object * bo_mob,struct vmw_surface ** srf_out)1420 static int vmw_create_bo_proxy(struct drm_device *dev,
1421 			       const struct drm_mode_fb_cmd2 *mode_cmd,
1422 			       struct vmw_buffer_object *bo_mob,
1423 			       struct vmw_surface **srf_out)
1424 {
1425 	struct vmw_surface_metadata metadata = {0};
1426 	uint32_t format;
1427 	struct vmw_resource *res;
1428 	unsigned int bytes_pp;
1429 	int ret;
1430 
1431 	switch (mode_cmd->pixel_format) {
1432 	case DRM_FORMAT_ARGB8888:
1433 	case DRM_FORMAT_XRGB8888:
1434 		format = SVGA3D_X8R8G8B8;
1435 		bytes_pp = 4;
1436 		break;
1437 
1438 	case DRM_FORMAT_RGB565:
1439 	case DRM_FORMAT_XRGB1555:
1440 		format = SVGA3D_R5G6B5;
1441 		bytes_pp = 2;
1442 		break;
1443 
1444 	case 8:
1445 		format = SVGA3D_P8;
1446 		bytes_pp = 1;
1447 		break;
1448 
1449 	default:
1450 		DRM_ERROR("Invalid framebuffer format %p4cc\n",
1451 			  &mode_cmd->pixel_format);
1452 		return -EINVAL;
1453 	}
1454 
1455 	metadata.format = format;
1456 	metadata.mip_levels[0] = 1;
1457 	metadata.num_sizes = 1;
1458 	metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1459 	metadata.base_size.height =  mode_cmd->height;
1460 	metadata.base_size.depth = 1;
1461 	metadata.scanout = true;
1462 
1463 	ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
1464 	if (ret) {
1465 		DRM_ERROR("Failed to allocate proxy content buffer\n");
1466 		return ret;
1467 	}
1468 
1469 	res = &(*srf_out)->res;
1470 
1471 	/* Reserve and switch the backing mob. */
1472 	mutex_lock(&res->dev_priv->cmdbuf_mutex);
1473 	(void) vmw_resource_reserve(res, false, true);
1474 	vmw_bo_unreference(&res->backup);
1475 	res->backup = vmw_bo_reference(bo_mob);
1476 	res->backup_offset = 0;
1477 	vmw_resource_unreserve(res, false, false, false, NULL, 0);
1478 	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1479 
1480 	return 0;
1481 }
1482 
1483 
1484 
vmw_kms_new_framebuffer_bo(struct vmw_private * dev_priv,struct vmw_buffer_object * bo,struct vmw_framebuffer ** out,const struct drm_mode_fb_cmd2 * mode_cmd)1485 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1486 				      struct vmw_buffer_object *bo,
1487 				      struct vmw_framebuffer **out,
1488 				      const struct drm_mode_fb_cmd2
1489 				      *mode_cmd)
1490 
1491 {
1492 	struct drm_device *dev = &dev_priv->drm;
1493 	struct vmw_framebuffer_bo *vfbd;
1494 	unsigned int requested_size;
1495 	int ret;
1496 
1497 	requested_size = mode_cmd->height * mode_cmd->pitches[0];
1498 	if (unlikely(requested_size > bo->base.base.size)) {
1499 		DRM_ERROR("Screen buffer object size is too small "
1500 			  "for requested mode.\n");
1501 		return -EINVAL;
1502 	}
1503 
1504 	if (!drm_any_plane_has_format(&dev_priv->drm,
1505 				      mode_cmd->pixel_format,
1506 				      mode_cmd->modifier[0])) {
1507 		drm_dbg(&dev_priv->drm,
1508 			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1509 			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1510 		return -EINVAL;
1511 	}
1512 
1513 	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1514 	if (!vfbd) {
1515 		ret = -ENOMEM;
1516 		goto out_err1;
1517 	}
1518 
1519 	vfbd->base.base.obj[0] = &bo->base.base;
1520 	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1521 	vfbd->base.bo = true;
1522 	vfbd->buffer = vmw_bo_reference(bo);
1523 	vfbd->base.user_handle = mode_cmd->handles[0];
1524 	*out = &vfbd->base;
1525 
1526 	ret = drm_framebuffer_init(dev, &vfbd->base.base,
1527 				   &vmw_framebuffer_bo_funcs);
1528 	if (ret)
1529 		goto out_err2;
1530 
1531 	return 0;
1532 
1533 out_err2:
1534 	vmw_bo_unreference(&bo);
1535 	kfree(vfbd);
1536 out_err1:
1537 	return ret;
1538 }
1539 
1540 
1541 /**
1542  * vmw_kms_srf_ok - check if a surface can be created
1543  *
1544  * @dev_priv: Pointer to device private struct.
1545  * @width: requested width
1546  * @height: requested height
1547  *
1548  * Surfaces need to be less than texture size
1549  */
1550 static bool
vmw_kms_srf_ok(struct vmw_private * dev_priv,uint32_t width,uint32_t height)1551 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1552 {
1553 	if (width  > dev_priv->texture_max_width ||
1554 	    height > dev_priv->texture_max_height)
1555 		return false;
1556 
1557 	return true;
1558 }
1559 
1560 /**
1561  * vmw_kms_new_framebuffer - Create a new framebuffer.
1562  *
1563  * @dev_priv: Pointer to device private struct.
1564  * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1565  * Either @bo or @surface must be NULL.
1566  * @surface: Pointer to a surface to wrap the kms framebuffer around.
1567  * Either @bo or @surface must be NULL.
1568  * @only_2d: No presents will occur to this buffer object based framebuffer.
1569  * This helps the code to do some important optimizations.
1570  * @mode_cmd: Frame-buffer metadata.
1571  */
1572 struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private * dev_priv,struct vmw_buffer_object * bo,struct vmw_surface * surface,bool only_2d,const struct drm_mode_fb_cmd2 * mode_cmd)1573 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1574 			struct vmw_buffer_object *bo,
1575 			struct vmw_surface *surface,
1576 			bool only_2d,
1577 			const struct drm_mode_fb_cmd2 *mode_cmd)
1578 {
1579 	struct vmw_framebuffer *vfb = NULL;
1580 	bool is_bo_proxy = false;
1581 	int ret;
1582 
1583 	/*
1584 	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1585 	 * therefore, wrap the buffer object in a surface so we can use the
1586 	 * SurfaceCopy command.
1587 	 */
1588 	if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
1589 	    bo && only_2d &&
1590 	    mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
1591 	    dev_priv->active_display_unit == vmw_du_screen_target) {
1592 		ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
1593 					  bo, &surface);
1594 		if (ret)
1595 			return ERR_PTR(ret);
1596 
1597 		is_bo_proxy = true;
1598 	}
1599 
1600 	/* Create the new framebuffer depending one what we have */
1601 	if (surface) {
1602 		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1603 						      mode_cmd,
1604 						      is_bo_proxy);
1605 		/*
1606 		 * vmw_create_bo_proxy() adds a reference that is no longer
1607 		 * needed
1608 		 */
1609 		if (is_bo_proxy)
1610 			vmw_surface_unreference(&surface);
1611 	} else if (bo) {
1612 		ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1613 						 mode_cmd);
1614 	} else {
1615 		BUG();
1616 	}
1617 
1618 	if (ret)
1619 		return ERR_PTR(ret);
1620 
1621 	vfb->pin = vmw_framebuffer_pin;
1622 	vfb->unpin = vmw_framebuffer_unpin;
1623 
1624 	return vfb;
1625 }
1626 
1627 /*
1628  * Generic Kernel modesetting functions
1629  */
1630 
vmw_kms_fb_create(struct drm_device * dev,struct drm_file * file_priv,const struct drm_mode_fb_cmd2 * mode_cmd)1631 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1632 						 struct drm_file *file_priv,
1633 						 const struct drm_mode_fb_cmd2 *mode_cmd)
1634 {
1635 	struct vmw_private *dev_priv = vmw_priv(dev);
1636 	struct vmw_framebuffer *vfb = NULL;
1637 	struct vmw_surface *surface = NULL;
1638 	struct vmw_buffer_object *bo = NULL;
1639 	int ret;
1640 
1641 	/* returns either a bo or surface */
1642 	ret = vmw_user_lookup_handle(dev_priv, file_priv,
1643 				     mode_cmd->handles[0],
1644 				     &surface, &bo);
1645 	if (ret) {
1646 		DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1647 			  mode_cmd->handles[0], mode_cmd->handles[0]);
1648 		goto err_out;
1649 	}
1650 
1651 
1652 	if (!bo &&
1653 	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1654 		DRM_ERROR("Surface size cannot exceed %dx%d\n",
1655 			dev_priv->texture_max_width,
1656 			dev_priv->texture_max_height);
1657 		goto err_out;
1658 	}
1659 
1660 
1661 	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1662 				      !(dev_priv->capabilities & SVGA_CAP_3D),
1663 				      mode_cmd);
1664 	if (IS_ERR(vfb)) {
1665 		ret = PTR_ERR(vfb);
1666 		goto err_out;
1667  	}
1668 
1669 err_out:
1670 	/* vmw_user_lookup_handle takes one ref so does new_fb */
1671 	if (bo)
1672 		vmw_bo_unreference(&bo);
1673 	if (surface)
1674 		vmw_surface_unreference(&surface);
1675 
1676 	if (ret) {
1677 		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1678 		return ERR_PTR(ret);
1679 	}
1680 
1681 	return &vfb->base;
1682 }
1683 
1684 /**
1685  * vmw_kms_check_display_memory - Validates display memory required for a
1686  * topology
1687  * @dev: DRM device
1688  * @num_rects: number of drm_rect in rects
1689  * @rects: array of drm_rect representing the topology to validate indexed by
1690  * crtc index.
1691  *
1692  * Returns:
1693  * 0 on success otherwise negative error code
1694  */
vmw_kms_check_display_memory(struct drm_device * dev,uint32_t num_rects,struct drm_rect * rects)1695 static int vmw_kms_check_display_memory(struct drm_device *dev,
1696 					uint32_t num_rects,
1697 					struct drm_rect *rects)
1698 {
1699 	struct vmw_private *dev_priv = vmw_priv(dev);
1700 	struct drm_rect bounding_box = {0};
1701 	u64 total_pixels = 0, pixel_mem, bb_mem;
1702 	int i;
1703 
1704 	for (i = 0; i < num_rects; i++) {
1705 		/*
1706 		 * For STDU only individual screen (screen target) is limited by
1707 		 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1708 		 */
1709 		if (dev_priv->active_display_unit == vmw_du_screen_target &&
1710 		    (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1711 		     drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1712 			VMW_DEBUG_KMS("Screen size not supported.\n");
1713 			return -EINVAL;
1714 		}
1715 
1716 		/* Bounding box upper left is at (0,0). */
1717 		if (rects[i].x2 > bounding_box.x2)
1718 			bounding_box.x2 = rects[i].x2;
1719 
1720 		if (rects[i].y2 > bounding_box.y2)
1721 			bounding_box.y2 = rects[i].y2;
1722 
1723 		total_pixels += (u64) drm_rect_width(&rects[i]) *
1724 			(u64) drm_rect_height(&rects[i]);
1725 	}
1726 
1727 	/* Virtual svga device primary limits are always in 32-bpp. */
1728 	pixel_mem = total_pixels * 4;
1729 
1730 	/*
1731 	 * For HV10 and below prim_bb_mem is vram size. When
1732 	 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1733 	 * limit on primary bounding box
1734 	 */
1735 	if (pixel_mem > dev_priv->max_primary_mem) {
1736 		VMW_DEBUG_KMS("Combined output size too large.\n");
1737 		return -EINVAL;
1738 	}
1739 
1740 	/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1741 	if (dev_priv->active_display_unit != vmw_du_screen_target ||
1742 	    !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1743 		bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1744 
1745 		if (bb_mem > dev_priv->max_primary_mem) {
1746 			VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1747 			return -EINVAL;
1748 		}
1749 	}
1750 
1751 	return 0;
1752 }
1753 
1754 /**
1755  * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1756  * crtc mutex
1757  * @state: The atomic state pointer containing the new atomic state
1758  * @crtc: The crtc
1759  *
1760  * This function returns the new crtc state if it's part of the state update.
1761  * Otherwise returns the current crtc state. It also makes sure that the
1762  * crtc mutex is locked.
1763  *
1764  * Returns: A valid crtc state pointer or NULL. It may also return a
1765  * pointer error, in particular -EDEADLK if locking needs to be rerun.
1766  */
1767 static struct drm_crtc_state *
vmw_crtc_state_and_lock(struct drm_atomic_state * state,struct drm_crtc * crtc)1768 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1769 {
1770 	struct drm_crtc_state *crtc_state;
1771 
1772 	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1773 	if (crtc_state) {
1774 		lockdep_assert_held(&crtc->mutex.mutex.base);
1775 	} else {
1776 		int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1777 
1778 		if (ret != 0 && ret != -EALREADY)
1779 			return ERR_PTR(ret);
1780 
1781 		crtc_state = crtc->state;
1782 	}
1783 
1784 	return crtc_state;
1785 }
1786 
1787 /**
1788  * vmw_kms_check_implicit - Verify that all implicit display units scan out
1789  * from the same fb after the new state is committed.
1790  * @dev: The drm_device.
1791  * @state: The new state to be checked.
1792  *
1793  * Returns:
1794  *   Zero on success,
1795  *   -EINVAL on invalid state,
1796  *   -EDEADLK if modeset locking needs to be rerun.
1797  */
vmw_kms_check_implicit(struct drm_device * dev,struct drm_atomic_state * state)1798 static int vmw_kms_check_implicit(struct drm_device *dev,
1799 				  struct drm_atomic_state *state)
1800 {
1801 	struct drm_framebuffer *implicit_fb = NULL;
1802 	struct drm_crtc *crtc;
1803 	struct drm_crtc_state *crtc_state;
1804 	struct drm_plane_state *plane_state;
1805 
1806 	drm_for_each_crtc(crtc, dev) {
1807 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1808 
1809 		if (!du->is_implicit)
1810 			continue;
1811 
1812 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1813 		if (IS_ERR(crtc_state))
1814 			return PTR_ERR(crtc_state);
1815 
1816 		if (!crtc_state || !crtc_state->enable)
1817 			continue;
1818 
1819 		/*
1820 		 * Can't move primary planes across crtcs, so this is OK.
1821 		 * It also means we don't need to take the plane mutex.
1822 		 */
1823 		plane_state = du->primary.state;
1824 		if (plane_state->crtc != crtc)
1825 			continue;
1826 
1827 		if (!implicit_fb)
1828 			implicit_fb = plane_state->fb;
1829 		else if (implicit_fb != plane_state->fb)
1830 			return -EINVAL;
1831 	}
1832 
1833 	return 0;
1834 }
1835 
1836 /**
1837  * vmw_kms_check_topology - Validates topology in drm_atomic_state
1838  * @dev: DRM device
1839  * @state: the driver state object
1840  *
1841  * Returns:
1842  * 0 on success otherwise negative error code
1843  */
vmw_kms_check_topology(struct drm_device * dev,struct drm_atomic_state * state)1844 static int vmw_kms_check_topology(struct drm_device *dev,
1845 				  struct drm_atomic_state *state)
1846 {
1847 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1848 	struct drm_rect *rects;
1849 	struct drm_crtc *crtc;
1850 	uint32_t i;
1851 	int ret = 0;
1852 
1853 	rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1854 			GFP_KERNEL);
1855 	if (!rects)
1856 		return -ENOMEM;
1857 
1858 	drm_for_each_crtc(crtc, dev) {
1859 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1860 		struct drm_crtc_state *crtc_state;
1861 
1862 		i = drm_crtc_index(crtc);
1863 
1864 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1865 		if (IS_ERR(crtc_state)) {
1866 			ret = PTR_ERR(crtc_state);
1867 			goto clean;
1868 		}
1869 
1870 		if (!crtc_state)
1871 			continue;
1872 
1873 		if (crtc_state->enable) {
1874 			rects[i].x1 = du->gui_x;
1875 			rects[i].y1 = du->gui_y;
1876 			rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1877 			rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1878 		} else {
1879 			rects[i].x1 = 0;
1880 			rects[i].y1 = 0;
1881 			rects[i].x2 = 0;
1882 			rects[i].y2 = 0;
1883 		}
1884 	}
1885 
1886 	/* Determine change to topology due to new atomic state */
1887 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1888 				      new_crtc_state, i) {
1889 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1890 		struct drm_connector *connector;
1891 		struct drm_connector_state *conn_state;
1892 		struct vmw_connector_state *vmw_conn_state;
1893 
1894 		if (!du->pref_active && new_crtc_state->enable) {
1895 			VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1896 			ret = -EINVAL;
1897 			goto clean;
1898 		}
1899 
1900 		/*
1901 		 * For vmwgfx each crtc has only one connector attached and it
1902 		 * is not changed so don't really need to check the
1903 		 * crtc->connector_mask and iterate over it.
1904 		 */
1905 		connector = &du->connector;
1906 		conn_state = drm_atomic_get_connector_state(state, connector);
1907 		if (IS_ERR(conn_state)) {
1908 			ret = PTR_ERR(conn_state);
1909 			goto clean;
1910 		}
1911 
1912 		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1913 		vmw_conn_state->gui_x = du->gui_x;
1914 		vmw_conn_state->gui_y = du->gui_y;
1915 	}
1916 
1917 	ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1918 					   rects);
1919 
1920 clean:
1921 	kfree(rects);
1922 	return ret;
1923 }
1924 
1925 /**
1926  * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1927  *
1928  * @dev: DRM device
1929  * @state: the driver state object
1930  *
1931  * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1932  * us to assign a value to mode->crtc_clock so that
1933  * drm_calc_timestamping_constants() won't throw an error message
1934  *
1935  * Returns:
1936  * Zero for success or -errno
1937  */
1938 static int
vmw_kms_atomic_check_modeset(struct drm_device * dev,struct drm_atomic_state * state)1939 vmw_kms_atomic_check_modeset(struct drm_device *dev,
1940 			     struct drm_atomic_state *state)
1941 {
1942 	struct drm_crtc *crtc;
1943 	struct drm_crtc_state *crtc_state;
1944 	bool need_modeset = false;
1945 	int i, ret;
1946 
1947 	ret = drm_atomic_helper_check(dev, state);
1948 	if (ret)
1949 		return ret;
1950 
1951 	ret = vmw_kms_check_implicit(dev, state);
1952 	if (ret) {
1953 		VMW_DEBUG_KMS("Invalid implicit state\n");
1954 		return ret;
1955 	}
1956 
1957 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1958 		if (drm_atomic_crtc_needs_modeset(crtc_state))
1959 			need_modeset = true;
1960 	}
1961 
1962 	if (need_modeset)
1963 		return vmw_kms_check_topology(dev, state);
1964 
1965 	return ret;
1966 }
1967 
1968 static const struct drm_mode_config_funcs vmw_kms_funcs = {
1969 	.fb_create = vmw_kms_fb_create,
1970 	.atomic_check = vmw_kms_atomic_check_modeset,
1971 	.atomic_commit = drm_atomic_helper_commit,
1972 };
1973 
vmw_kms_generic_present(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct vmw_surface * surface,uint32_t sid,int32_t destX,int32_t destY,struct drm_vmw_rect * clips,uint32_t num_clips)1974 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1975 				   struct drm_file *file_priv,
1976 				   struct vmw_framebuffer *vfb,
1977 				   struct vmw_surface *surface,
1978 				   uint32_t sid,
1979 				   int32_t destX, int32_t destY,
1980 				   struct drm_vmw_rect *clips,
1981 				   uint32_t num_clips)
1982 {
1983 	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1984 					    &surface->res, destX, destY,
1985 					    num_clips, 1, NULL, NULL);
1986 }
1987 
1988 
vmw_kms_present(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct vmw_surface * surface,uint32_t sid,int32_t destX,int32_t destY,struct drm_vmw_rect * clips,uint32_t num_clips)1989 int vmw_kms_present(struct vmw_private *dev_priv,
1990 		    struct drm_file *file_priv,
1991 		    struct vmw_framebuffer *vfb,
1992 		    struct vmw_surface *surface,
1993 		    uint32_t sid,
1994 		    int32_t destX, int32_t destY,
1995 		    struct drm_vmw_rect *clips,
1996 		    uint32_t num_clips)
1997 {
1998 	int ret;
1999 
2000 	switch (dev_priv->active_display_unit) {
2001 	case vmw_du_screen_target:
2002 		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
2003 						 &surface->res, destX, destY,
2004 						 num_clips, 1, NULL, NULL);
2005 		break;
2006 	case vmw_du_screen_object:
2007 		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
2008 					      sid, destX, destY, clips,
2009 					      num_clips);
2010 		break;
2011 	default:
2012 		WARN_ONCE(true,
2013 			  "Present called with invalid display system.\n");
2014 		ret = -ENOSYS;
2015 		break;
2016 	}
2017 	if (ret)
2018 		return ret;
2019 
2020 	vmw_cmd_flush(dev_priv, false);
2021 
2022 	return 0;
2023 }
2024 
2025 static void
vmw_kms_create_hotplug_mode_update_property(struct vmw_private * dev_priv)2026 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
2027 {
2028 	if (dev_priv->hotplug_mode_update_property)
2029 		return;
2030 
2031 	dev_priv->hotplug_mode_update_property =
2032 		drm_property_create_range(&dev_priv->drm,
2033 					  DRM_MODE_PROP_IMMUTABLE,
2034 					  "hotplug_mode_update", 0, 1);
2035 }
2036 
vmw_kms_init(struct vmw_private * dev_priv)2037 int vmw_kms_init(struct vmw_private *dev_priv)
2038 {
2039 	struct drm_device *dev = &dev_priv->drm;
2040 	int ret;
2041 	static const char *display_unit_names[] = {
2042 		"Invalid",
2043 		"Legacy",
2044 		"Screen Object",
2045 		"Screen Target",
2046 		"Invalid (max)"
2047 	};
2048 
2049 	drm_mode_config_init(dev);
2050 	dev->mode_config.funcs = &vmw_kms_funcs;
2051 	dev->mode_config.min_width = 1;
2052 	dev->mode_config.min_height = 1;
2053 	dev->mode_config.max_width = dev_priv->texture_max_width;
2054 	dev->mode_config.max_height = dev_priv->texture_max_height;
2055 
2056 	drm_mode_create_suggested_offset_properties(dev);
2057 	vmw_kms_create_hotplug_mode_update_property(dev_priv);
2058 
2059 	ret = vmw_kms_stdu_init_display(dev_priv);
2060 	if (ret) {
2061 		ret = vmw_kms_sou_init_display(dev_priv);
2062 		if (ret) /* Fallback */
2063 			ret = vmw_kms_ldu_init_display(dev_priv);
2064 	}
2065 	BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
2066 	drm_info(&dev_priv->drm, "%s display unit initialized\n",
2067 		 display_unit_names[dev_priv->active_display_unit]);
2068 
2069 	return ret;
2070 }
2071 
vmw_kms_close(struct vmw_private * dev_priv)2072 int vmw_kms_close(struct vmw_private *dev_priv)
2073 {
2074 	int ret = 0;
2075 
2076 	/*
2077 	 * Docs says we should take the lock before calling this function
2078 	 * but since it destroys encoders and our destructor calls
2079 	 * drm_encoder_cleanup which takes the lock we deadlock.
2080 	 */
2081 	drm_mode_config_cleanup(&dev_priv->drm);
2082 	if (dev_priv->active_display_unit == vmw_du_legacy)
2083 		ret = vmw_kms_ldu_close_display(dev_priv);
2084 
2085 	return ret;
2086 }
2087 
vmw_kms_cursor_bypass_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)2088 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
2089 				struct drm_file *file_priv)
2090 {
2091 	struct drm_vmw_cursor_bypass_arg *arg = data;
2092 	struct vmw_display_unit *du;
2093 	struct drm_crtc *crtc;
2094 	int ret = 0;
2095 
2096 
2097 	mutex_lock(&dev->mode_config.mutex);
2098 	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2099 
2100 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2101 			du = vmw_crtc_to_du(crtc);
2102 			du->hotspot_x = arg->xhot;
2103 			du->hotspot_y = arg->yhot;
2104 		}
2105 
2106 		mutex_unlock(&dev->mode_config.mutex);
2107 		return 0;
2108 	}
2109 
2110 	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
2111 	if (!crtc) {
2112 		ret = -ENOENT;
2113 		goto out;
2114 	}
2115 
2116 	du = vmw_crtc_to_du(crtc);
2117 
2118 	du->hotspot_x = arg->xhot;
2119 	du->hotspot_y = arg->yhot;
2120 
2121 out:
2122 	mutex_unlock(&dev->mode_config.mutex);
2123 
2124 	return ret;
2125 }
2126 
vmw_kms_write_svga(struct vmw_private * vmw_priv,unsigned width,unsigned height,unsigned pitch,unsigned bpp,unsigned depth)2127 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2128 			unsigned width, unsigned height, unsigned pitch,
2129 			unsigned bpp, unsigned depth)
2130 {
2131 	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2132 		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2133 	else if (vmw_fifo_have_pitchlock(vmw_priv))
2134 		vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2135 	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2136 	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2137 	if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2138 		vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2139 
2140 	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2141 		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2142 			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2143 		return -EINVAL;
2144 	}
2145 
2146 	return 0;
2147 }
2148 
vmw_kms_validate_mode_vram(struct vmw_private * dev_priv,uint32_t pitch,uint32_t height)2149 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2150 				uint32_t pitch,
2151 				uint32_t height)
2152 {
2153 	return ((u64) pitch * (u64) height) < (u64)
2154 		((dev_priv->active_display_unit == vmw_du_screen_target) ?
2155 		 dev_priv->max_primary_mem : dev_priv->vram_size);
2156 }
2157 
2158 
2159 /*
2160  * Function called by DRM code called with vbl_lock held.
2161  */
vmw_get_vblank_counter(struct drm_crtc * crtc)2162 u32 vmw_get_vblank_counter(struct drm_crtc *crtc)
2163 {
2164 	return 0;
2165 }
2166 
2167 /*
2168  * Function called by DRM code called with vbl_lock held.
2169  */
vmw_enable_vblank(struct drm_crtc * crtc)2170 int vmw_enable_vblank(struct drm_crtc *crtc)
2171 {
2172 	return -EINVAL;
2173 }
2174 
2175 /*
2176  * Function called by DRM code called with vbl_lock held.
2177  */
vmw_disable_vblank(struct drm_crtc * crtc)2178 void vmw_disable_vblank(struct drm_crtc *crtc)
2179 {
2180 }
2181 
2182 /**
2183  * vmw_du_update_layout - Update the display unit with topology from resolution
2184  * plugin and generate DRM uevent
2185  * @dev_priv: device private
2186  * @num_rects: number of drm_rect in rects
2187  * @rects: toplogy to update
2188  */
vmw_du_update_layout(struct vmw_private * dev_priv,unsigned int num_rects,struct drm_rect * rects)2189 static int vmw_du_update_layout(struct vmw_private *dev_priv,
2190 				unsigned int num_rects, struct drm_rect *rects)
2191 {
2192 	struct drm_device *dev = &dev_priv->drm;
2193 	struct vmw_display_unit *du;
2194 	struct drm_connector *con;
2195 	struct drm_connector_list_iter conn_iter;
2196 	struct drm_modeset_acquire_ctx ctx;
2197 	struct drm_crtc *crtc;
2198 	int ret;
2199 
2200 	/* Currently gui_x/y is protected with the crtc mutex */
2201 	mutex_lock(&dev->mode_config.mutex);
2202 	drm_modeset_acquire_init(&ctx, 0);
2203 retry:
2204 	drm_for_each_crtc(crtc, dev) {
2205 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2206 		if (ret < 0) {
2207 			if (ret == -EDEADLK) {
2208 				drm_modeset_backoff(&ctx);
2209 				goto retry;
2210       		}
2211 			goto out_fini;
2212 		}
2213 	}
2214 
2215 	drm_connector_list_iter_begin(dev, &conn_iter);
2216 	drm_for_each_connector_iter(con, &conn_iter) {
2217 		du = vmw_connector_to_du(con);
2218 		if (num_rects > du->unit) {
2219 			du->pref_width = drm_rect_width(&rects[du->unit]);
2220 			du->pref_height = drm_rect_height(&rects[du->unit]);
2221 			du->pref_active = true;
2222 			du->gui_x = rects[du->unit].x1;
2223 			du->gui_y = rects[du->unit].y1;
2224 		} else {
2225 			du->pref_width = 800;
2226 			du->pref_height = 600;
2227 			du->pref_active = false;
2228 			du->gui_x = 0;
2229 			du->gui_y = 0;
2230 		}
2231 	}
2232 	drm_connector_list_iter_end(&conn_iter);
2233 
2234 	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2235 		du = vmw_connector_to_du(con);
2236 		if (num_rects > du->unit) {
2237 			drm_object_property_set_value
2238 			  (&con->base, dev->mode_config.suggested_x_property,
2239 			   du->gui_x);
2240 			drm_object_property_set_value
2241 			  (&con->base, dev->mode_config.suggested_y_property,
2242 			   du->gui_y);
2243 		} else {
2244 			drm_object_property_set_value
2245 			  (&con->base, dev->mode_config.suggested_x_property,
2246 			   0);
2247 			drm_object_property_set_value
2248 			  (&con->base, dev->mode_config.suggested_y_property,
2249 			   0);
2250 		}
2251 		con->status = vmw_du_connector_detect(con, true);
2252 	}
2253 
2254 	drm_sysfs_hotplug_event(dev);
2255 out_fini:
2256 	drm_modeset_drop_locks(&ctx);
2257 	drm_modeset_acquire_fini(&ctx);
2258 	mutex_unlock(&dev->mode_config.mutex);
2259 
2260 	return 0;
2261 }
2262 
vmw_du_crtc_gamma_set(struct drm_crtc * crtc,u16 * r,u16 * g,u16 * b,uint32_t size,struct drm_modeset_acquire_ctx * ctx)2263 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2264 			  u16 *r, u16 *g, u16 *b,
2265 			  uint32_t size,
2266 			  struct drm_modeset_acquire_ctx *ctx)
2267 {
2268 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2269 	int i;
2270 
2271 	for (i = 0; i < size; i++) {
2272 		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2273 			  r[i], g[i], b[i]);
2274 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2275 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2276 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2277 	}
2278 
2279 	return 0;
2280 }
2281 
vmw_du_connector_dpms(struct drm_connector * connector,int mode)2282 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2283 {
2284 	return 0;
2285 }
2286 
2287 enum drm_connector_status
vmw_du_connector_detect(struct drm_connector * connector,bool force)2288 vmw_du_connector_detect(struct drm_connector *connector, bool force)
2289 {
2290 	uint32_t num_displays;
2291 	struct drm_device *dev = connector->dev;
2292 	struct vmw_private *dev_priv = vmw_priv(dev);
2293 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2294 
2295 	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2296 
2297 	return ((vmw_connector_to_du(connector)->unit < num_displays &&
2298 		 du->pref_active) ?
2299 		connector_status_connected : connector_status_disconnected);
2300 }
2301 
2302 static struct drm_display_mode vmw_kms_connector_builtin[] = {
2303 	/* 640x480@60Hz */
2304 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
2305 		   752, 800, 0, 480, 489, 492, 525, 0,
2306 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2307 	/* 800x600@60Hz */
2308 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
2309 		   968, 1056, 0, 600, 601, 605, 628, 0,
2310 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2311 	/* 1024x768@60Hz */
2312 	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
2313 		   1184, 1344, 0, 768, 771, 777, 806, 0,
2314 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2315 	/* 1152x864@75Hz */
2316 	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
2317 		   1344, 1600, 0, 864, 865, 868, 900, 0,
2318 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2319 	/* 1280x720@60Hz */
2320 	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
2321 		   1472, 1664, 0, 720, 723, 728, 748, 0,
2322 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2323 	/* 1280x768@60Hz */
2324 	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
2325 		   1472, 1664, 0, 768, 771, 778, 798, 0,
2326 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2327 	/* 1280x800@60Hz */
2328 	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
2329 		   1480, 1680, 0, 800, 803, 809, 831, 0,
2330 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2331 	/* 1280x960@60Hz */
2332 	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
2333 		   1488, 1800, 0, 960, 961, 964, 1000, 0,
2334 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2335 	/* 1280x1024@60Hz */
2336 	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
2337 		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
2338 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2339 	/* 1360x768@60Hz */
2340 	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
2341 		   1536, 1792, 0, 768, 771, 777, 795, 0,
2342 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2343 	/* 1440x1050@60Hz */
2344 	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
2345 		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
2346 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2347 	/* 1440x900@60Hz */
2348 	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
2349 		   1672, 1904, 0, 900, 903, 909, 934, 0,
2350 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2351 	/* 1600x1200@60Hz */
2352 	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
2353 		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
2354 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2355 	/* 1680x1050@60Hz */
2356 	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
2357 		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
2358 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2359 	/* 1792x1344@60Hz */
2360 	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2361 		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
2362 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2363 	/* 1853x1392@60Hz */
2364 	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2365 		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
2366 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2367 	/* 1920x1080@60Hz */
2368 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
2369 		   2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
2370 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2371 	/* 1920x1200@60Hz */
2372 	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2373 		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
2374 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2375 	/* 1920x1440@60Hz */
2376 	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2377 		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
2378 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2379 	/* 2560x1440@60Hz */
2380 	{ DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
2381 		   2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
2382 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2383 	/* 2560x1600@60Hz */
2384 	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
2385 		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
2386 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2387 	/* 2880x1800@60Hz */
2388 	{ DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
2389 		   2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
2390 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2391 	/* 3840x2160@60Hz */
2392 	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
2393 		   3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
2394 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2395 	/* 3840x2400@60Hz */
2396 	{ DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
2397 		   3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
2398 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2399 	/* Terminate */
2400 	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
2401 };
2402 
2403 /**
2404  * vmw_guess_mode_timing - Provide fake timings for a
2405  * 60Hz vrefresh mode.
2406  *
2407  * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2408  * members filled in.
2409  */
vmw_guess_mode_timing(struct drm_display_mode * mode)2410 void vmw_guess_mode_timing(struct drm_display_mode *mode)
2411 {
2412 	mode->hsync_start = mode->hdisplay + 50;
2413 	mode->hsync_end = mode->hsync_start + 50;
2414 	mode->htotal = mode->hsync_end + 50;
2415 
2416 	mode->vsync_start = mode->vdisplay + 50;
2417 	mode->vsync_end = mode->vsync_start + 50;
2418 	mode->vtotal = mode->vsync_end + 50;
2419 
2420 	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2421 }
2422 
2423 
vmw_du_connector_fill_modes(struct drm_connector * connector,uint32_t max_width,uint32_t max_height)2424 int vmw_du_connector_fill_modes(struct drm_connector *connector,
2425 				uint32_t max_width, uint32_t max_height)
2426 {
2427 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2428 	struct drm_device *dev = connector->dev;
2429 	struct vmw_private *dev_priv = vmw_priv(dev);
2430 	struct drm_display_mode *mode = NULL;
2431 	struct drm_display_mode *bmode;
2432 	struct drm_display_mode prefmode = { DRM_MODE("preferred",
2433 		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2434 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2435 		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2436 	};
2437 	int i;
2438 	u32 assumed_bpp = 4;
2439 
2440 	if (dev_priv->assume_16bpp)
2441 		assumed_bpp = 2;
2442 
2443 	max_width  = min(max_width,  dev_priv->texture_max_width);
2444 	max_height = min(max_height, dev_priv->texture_max_height);
2445 
2446 	/*
2447 	 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
2448 	 * HEIGHT registers.
2449 	 */
2450 	if (dev_priv->active_display_unit == vmw_du_screen_target) {
2451 		max_width  = min(max_width,  dev_priv->stdu_max_width);
2452 		max_height = min(max_height, dev_priv->stdu_max_height);
2453 	}
2454 
2455 	/* Add preferred mode */
2456 	mode = drm_mode_duplicate(dev, &prefmode);
2457 	if (!mode)
2458 		return 0;
2459 	mode->hdisplay = du->pref_width;
2460 	mode->vdisplay = du->pref_height;
2461 	vmw_guess_mode_timing(mode);
2462 	drm_mode_set_name(mode);
2463 
2464 	if (vmw_kms_validate_mode_vram(dev_priv,
2465 					mode->hdisplay * assumed_bpp,
2466 					mode->vdisplay)) {
2467 		drm_mode_probed_add(connector, mode);
2468 	} else {
2469 		drm_mode_destroy(dev, mode);
2470 		mode = NULL;
2471 	}
2472 
2473 	if (du->pref_mode) {
2474 		list_del_init(&du->pref_mode->head);
2475 		drm_mode_destroy(dev, du->pref_mode);
2476 	}
2477 
2478 	/* mode might be null here, this is intended */
2479 	du->pref_mode = mode;
2480 
2481 	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
2482 		bmode = &vmw_kms_connector_builtin[i];
2483 		if (bmode->hdisplay > max_width ||
2484 		    bmode->vdisplay > max_height)
2485 			continue;
2486 
2487 		if (!vmw_kms_validate_mode_vram(dev_priv,
2488 						bmode->hdisplay * assumed_bpp,
2489 						bmode->vdisplay))
2490 			continue;
2491 
2492 		mode = drm_mode_duplicate(dev, bmode);
2493 		if (!mode)
2494 			return 0;
2495 
2496 		drm_mode_probed_add(connector, mode);
2497 	}
2498 
2499 	drm_connector_list_update(connector);
2500 	/* Move the prefered mode first, help apps pick the right mode. */
2501 	drm_mode_sort(&connector->modes);
2502 
2503 	return 1;
2504 }
2505 
2506 /**
2507  * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2508  * @dev: drm device for the ioctl
2509  * @data: data pointer for the ioctl
2510  * @file_priv: drm file for the ioctl call
2511  *
2512  * Update preferred topology of display unit as per ioctl request. The topology
2513  * is expressed as array of drm_vmw_rect.
2514  * e.g.
2515  * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2516  *
2517  * NOTE:
2518  * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2519  * device limit on topology, x + w and y + h (lower right) cannot be greater
2520  * than INT_MAX. So topology beyond these limits will return with error.
2521  *
2522  * Returns:
2523  * Zero on success, negative errno on failure.
2524  */
vmw_kms_update_layout_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)2525 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2526 				struct drm_file *file_priv)
2527 {
2528 	struct vmw_private *dev_priv = vmw_priv(dev);
2529 	struct drm_mode_config *mode_config = &dev->mode_config;
2530 	struct drm_vmw_update_layout_arg *arg =
2531 		(struct drm_vmw_update_layout_arg *)data;
2532 	void __user *user_rects;
2533 	struct drm_vmw_rect *rects;
2534 	struct drm_rect *drm_rects;
2535 	unsigned rects_size;
2536 	int ret, i;
2537 
2538 	if (!arg->num_outputs) {
2539 		struct drm_rect def_rect = {0, 0, 800, 600};
2540 		VMW_DEBUG_KMS("Default layout x1 = %d y1 = %d x2 = %d y2 = %d\n",
2541 			      def_rect.x1, def_rect.y1,
2542 			      def_rect.x2, def_rect.y2);
2543 		vmw_du_update_layout(dev_priv, 1, &def_rect);
2544 		return 0;
2545 	}
2546 
2547 	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2548 	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2549 			GFP_KERNEL);
2550 	if (unlikely(!rects))
2551 		return -ENOMEM;
2552 
2553 	user_rects = (void __user *)(unsigned long)arg->rects;
2554 	ret = copy_from_user(rects, user_rects, rects_size);
2555 	if (unlikely(ret != 0)) {
2556 		DRM_ERROR("Failed to get rects.\n");
2557 		ret = -EFAULT;
2558 		goto out_free;
2559 	}
2560 
2561 	drm_rects = (struct drm_rect *)rects;
2562 
2563 	VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2564 	for (i = 0; i < arg->num_outputs; i++) {
2565 		struct drm_vmw_rect curr_rect;
2566 
2567 		/* Verify user-space for overflow as kernel use drm_rect */
2568 		if ((rects[i].x + rects[i].w > INT_MAX) ||
2569 		    (rects[i].y + rects[i].h > INT_MAX)) {
2570 			ret = -ERANGE;
2571 			goto out_free;
2572 		}
2573 
2574 		curr_rect = rects[i];
2575 		drm_rects[i].x1 = curr_rect.x;
2576 		drm_rects[i].y1 = curr_rect.y;
2577 		drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2578 		drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2579 
2580 		VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
2581 			      drm_rects[i].x1, drm_rects[i].y1,
2582 			      drm_rects[i].x2, drm_rects[i].y2);
2583 
2584 		/*
2585 		 * Currently this check is limiting the topology within
2586 		 * mode_config->max (which actually is max texture size
2587 		 * supported by virtual device). This limit is here to address
2588 		 * window managers that create a big framebuffer for whole
2589 		 * topology.
2590 		 */
2591 		if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
2592 		    drm_rects[i].x2 > mode_config->max_width ||
2593 		    drm_rects[i].y2 > mode_config->max_height) {
2594 			VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2595 				      drm_rects[i].x1, drm_rects[i].y1,
2596 				      drm_rects[i].x2, drm_rects[i].y2);
2597 			ret = -EINVAL;
2598 			goto out_free;
2599 		}
2600 	}
2601 
2602 	ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2603 
2604 	if (ret == 0)
2605 		vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2606 
2607 out_free:
2608 	kfree(rects);
2609 	return ret;
2610 }
2611 
2612 /**
2613  * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2614  * on a set of cliprects and a set of display units.
2615  *
2616  * @dev_priv: Pointer to a device private structure.
2617  * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2618  * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2619  * Cliprects are given in framebuffer coordinates.
2620  * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2621  * be NULL. Cliprects are given in source coordinates.
2622  * @dest_x: X coordinate offset for the crtc / destination clip rects.
2623  * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2624  * @num_clips: Number of cliprects in the @clips or @vclips array.
2625  * @increment: Integer with which to increment the clip counter when looping.
2626  * Used to skip a predetermined number of clip rects.
2627  * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2628  */
vmw_kms_helper_dirty(struct vmw_private * dev_priv,struct vmw_framebuffer * framebuffer,const struct drm_clip_rect * clips,const struct drm_vmw_rect * vclips,s32 dest_x,s32 dest_y,int num_clips,int increment,struct vmw_kms_dirty * dirty)2629 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2630 			 struct vmw_framebuffer *framebuffer,
2631 			 const struct drm_clip_rect *clips,
2632 			 const struct drm_vmw_rect *vclips,
2633 			 s32 dest_x, s32 dest_y,
2634 			 int num_clips,
2635 			 int increment,
2636 			 struct vmw_kms_dirty *dirty)
2637 {
2638 	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2639 	struct drm_crtc *crtc;
2640 	u32 num_units = 0;
2641 	u32 i, k;
2642 
2643 	dirty->dev_priv = dev_priv;
2644 
2645 	/* If crtc is passed, no need to iterate over other display units */
2646 	if (dirty->crtc) {
2647 		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2648 	} else {
2649 		list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2650 				    head) {
2651 			struct drm_plane *plane = crtc->primary;
2652 
2653 			if (plane->state->fb == &framebuffer->base)
2654 				units[num_units++] = vmw_crtc_to_du(crtc);
2655 		}
2656 	}
2657 
2658 	for (k = 0; k < num_units; k++) {
2659 		struct vmw_display_unit *unit = units[k];
2660 		s32 crtc_x = unit->crtc.x;
2661 		s32 crtc_y = unit->crtc.y;
2662 		s32 crtc_width = unit->crtc.mode.hdisplay;
2663 		s32 crtc_height = unit->crtc.mode.vdisplay;
2664 		const struct drm_clip_rect *clips_ptr = clips;
2665 		const struct drm_vmw_rect *vclips_ptr = vclips;
2666 
2667 		dirty->unit = unit;
2668 		if (dirty->fifo_reserve_size > 0) {
2669 			dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2670 						      dirty->fifo_reserve_size);
2671 			if (!dirty->cmd)
2672 				return -ENOMEM;
2673 
2674 			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2675 		}
2676 		dirty->num_hits = 0;
2677 		for (i = 0; i < num_clips; i++, clips_ptr += increment,
2678 		       vclips_ptr += increment) {
2679 			s32 clip_left;
2680 			s32 clip_top;
2681 
2682 			/*
2683 			 * Select clip array type. Note that integer type
2684 			 * in @clips is unsigned short, whereas in @vclips
2685 			 * it's 32-bit.
2686 			 */
2687 			if (clips) {
2688 				dirty->fb_x = (s32) clips_ptr->x1;
2689 				dirty->fb_y = (s32) clips_ptr->y1;
2690 				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2691 					crtc_x;
2692 				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2693 					crtc_y;
2694 			} else {
2695 				dirty->fb_x = vclips_ptr->x;
2696 				dirty->fb_y = vclips_ptr->y;
2697 				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2698 					dest_x - crtc_x;
2699 				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2700 					dest_y - crtc_y;
2701 			}
2702 
2703 			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2704 			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2705 
2706 			/* Skip this clip if it's outside the crtc region */
2707 			if (dirty->unit_x1 >= crtc_width ||
2708 			    dirty->unit_y1 >= crtc_height ||
2709 			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2710 				continue;
2711 
2712 			/* Clip right and bottom to crtc limits */
2713 			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2714 					       crtc_width);
2715 			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2716 					       crtc_height);
2717 
2718 			/* Clip left and top to crtc limits */
2719 			clip_left = min_t(s32, dirty->unit_x1, 0);
2720 			clip_top = min_t(s32, dirty->unit_y1, 0);
2721 			dirty->unit_x1 -= clip_left;
2722 			dirty->unit_y1 -= clip_top;
2723 			dirty->fb_x -= clip_left;
2724 			dirty->fb_y -= clip_top;
2725 
2726 			dirty->clip(dirty);
2727 		}
2728 
2729 		dirty->fifo_commit(dirty);
2730 	}
2731 
2732 	return 0;
2733 }
2734 
2735 /**
2736  * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2737  * cleanup and fencing
2738  * @dev_priv: Pointer to the device-private struct
2739  * @file_priv: Pointer identifying the client when user-space fencing is used
2740  * @ctx: Pointer to the validation context
2741  * @out_fence: If non-NULL, returned refcounted fence-pointer
2742  * @user_fence_rep: If non-NULL, pointer to user-space address area
2743  * in which to copy user-space fence info
2744  */
vmw_kms_helper_validation_finish(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_validation_context * ctx,struct vmw_fence_obj ** out_fence,struct drm_vmw_fence_rep __user * user_fence_rep)2745 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2746 				      struct drm_file *file_priv,
2747 				      struct vmw_validation_context *ctx,
2748 				      struct vmw_fence_obj **out_fence,
2749 				      struct drm_vmw_fence_rep __user *
2750 				      user_fence_rep)
2751 {
2752 	struct vmw_fence_obj *fence = NULL;
2753 	uint32_t handle = 0;
2754 	int ret = 0;
2755 
2756 	if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2757 	    out_fence)
2758 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2759 						 file_priv ? &handle : NULL);
2760 	vmw_validation_done(ctx, fence);
2761 	if (file_priv)
2762 		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2763 					    ret, user_fence_rep, fence,
2764 					    handle, -1);
2765 	if (out_fence)
2766 		*out_fence = fence;
2767 	else
2768 		vmw_fence_obj_unreference(&fence);
2769 }
2770 
2771 /**
2772  * vmw_kms_update_proxy - Helper function to update a proxy surface from
2773  * its backing MOB.
2774  *
2775  * @res: Pointer to the surface resource
2776  * @clips: Clip rects in framebuffer (surface) space.
2777  * @num_clips: Number of clips in @clips.
2778  * @increment: Integer with which to increment the clip counter when looping.
2779  * Used to skip a predetermined number of clip rects.
2780  *
2781  * This function makes sure the proxy surface is updated from its backing MOB
2782  * using the region given by @clips. The surface resource @res and its backing
2783  * MOB needs to be reserved and validated on call.
2784  */
vmw_kms_update_proxy(struct vmw_resource * res,const struct drm_clip_rect * clips,unsigned num_clips,int increment)2785 int vmw_kms_update_proxy(struct vmw_resource *res,
2786 			 const struct drm_clip_rect *clips,
2787 			 unsigned num_clips,
2788 			 int increment)
2789 {
2790 	struct vmw_private *dev_priv = res->dev_priv;
2791 	struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2792 	struct {
2793 		SVGA3dCmdHeader header;
2794 		SVGA3dCmdUpdateGBImage body;
2795 	} *cmd;
2796 	SVGA3dBox *box;
2797 	size_t copy_size = 0;
2798 	int i;
2799 
2800 	if (!clips)
2801 		return 0;
2802 
2803 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2804 	if (!cmd)
2805 		return -ENOMEM;
2806 
2807 	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2808 		box = &cmd->body.box;
2809 
2810 		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2811 		cmd->header.size = sizeof(cmd->body);
2812 		cmd->body.image.sid = res->id;
2813 		cmd->body.image.face = 0;
2814 		cmd->body.image.mipmap = 0;
2815 
2816 		if (clips->x1 > size->width || clips->x2 > size->width ||
2817 		    clips->y1 > size->height || clips->y2 > size->height) {
2818 			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2819 			return -EINVAL;
2820 		}
2821 
2822 		box->x = clips->x1;
2823 		box->y = clips->y1;
2824 		box->z = 0;
2825 		box->w = clips->x2 - clips->x1;
2826 		box->h = clips->y2 - clips->y1;
2827 		box->d = 1;
2828 
2829 		copy_size += sizeof(*cmd);
2830 	}
2831 
2832 	vmw_cmd_commit(dev_priv, copy_size);
2833 
2834 	return 0;
2835 }
2836 
vmw_kms_fbdev_init_data(struct vmw_private * dev_priv,unsigned unit,u32 max_width,u32 max_height,struct drm_connector ** p_con,struct drm_crtc ** p_crtc,struct drm_display_mode ** p_mode)2837 int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
2838 			    unsigned unit,
2839 			    u32 max_width,
2840 			    u32 max_height,
2841 			    struct drm_connector **p_con,
2842 			    struct drm_crtc **p_crtc,
2843 			    struct drm_display_mode **p_mode)
2844 {
2845 	struct drm_connector *con;
2846 	struct vmw_display_unit *du;
2847 	struct drm_display_mode *mode;
2848 	int i = 0;
2849 	int ret = 0;
2850 
2851 	mutex_lock(&dev_priv->drm.mode_config.mutex);
2852 	list_for_each_entry(con, &dev_priv->drm.mode_config.connector_list,
2853 			    head) {
2854 		if (i == unit)
2855 			break;
2856 
2857 		++i;
2858 	}
2859 
2860 	if (&con->head == &dev_priv->drm.mode_config.connector_list) {
2861 		DRM_ERROR("Could not find initial display unit.\n");
2862 		ret = -EINVAL;
2863 		goto out_unlock;
2864 	}
2865 
2866 	if (list_empty(&con->modes))
2867 		(void) vmw_du_connector_fill_modes(con, max_width, max_height);
2868 
2869 	if (list_empty(&con->modes)) {
2870 		DRM_ERROR("Could not find initial display mode.\n");
2871 		ret = -EINVAL;
2872 		goto out_unlock;
2873 	}
2874 
2875 	du = vmw_connector_to_du(con);
2876 	*p_con = con;
2877 	*p_crtc = &du->crtc;
2878 
2879 	list_for_each_entry(mode, &con->modes, head) {
2880 		if (mode->type & DRM_MODE_TYPE_PREFERRED)
2881 			break;
2882 	}
2883 
2884 	if (&mode->head == &con->modes) {
2885 		WARN_ONCE(true, "Could not find initial preferred mode.\n");
2886 		*p_mode = list_first_entry(&con->modes,
2887 					   struct drm_display_mode,
2888 					   head);
2889 	} else {
2890 		*p_mode = mode;
2891 	}
2892 
2893  out_unlock:
2894 	mutex_unlock(&dev_priv->drm.mode_config.mutex);
2895 
2896 	return ret;
2897 }
2898 
2899 /**
2900  * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2901  * property.
2902  *
2903  * @dev_priv: Pointer to a device private struct.
2904  *
2905  * Sets up the implicit placement property unless it's already set up.
2906  */
2907 void
vmw_kms_create_implicit_placement_property(struct vmw_private * dev_priv)2908 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2909 {
2910 	if (dev_priv->implicit_placement_property)
2911 		return;
2912 
2913 	dev_priv->implicit_placement_property =
2914 		drm_property_create_range(&dev_priv->drm,
2915 					  DRM_MODE_PROP_IMMUTABLE,
2916 					  "implicit_placement", 0, 1);
2917 }
2918 
2919 /**
2920  * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2921  *
2922  * @dev: Pointer to the drm device
2923  * Return: 0 on success. Negative error code on failure.
2924  */
vmw_kms_suspend(struct drm_device * dev)2925 int vmw_kms_suspend(struct drm_device *dev)
2926 {
2927 	struct vmw_private *dev_priv = vmw_priv(dev);
2928 
2929 	dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2930 	if (IS_ERR(dev_priv->suspend_state)) {
2931 		int ret = PTR_ERR(dev_priv->suspend_state);
2932 
2933 		DRM_ERROR("Failed kms suspend: %d\n", ret);
2934 		dev_priv->suspend_state = NULL;
2935 
2936 		return ret;
2937 	}
2938 
2939 	return 0;
2940 }
2941 
2942 
2943 /**
2944  * vmw_kms_resume - Re-enable modesetting and restore state
2945  *
2946  * @dev: Pointer to the drm device
2947  * Return: 0 on success. Negative error code on failure.
2948  *
2949  * State is resumed from a previous vmw_kms_suspend(). It's illegal
2950  * to call this function without a previous vmw_kms_suspend().
2951  */
vmw_kms_resume(struct drm_device * dev)2952 int vmw_kms_resume(struct drm_device *dev)
2953 {
2954 	struct vmw_private *dev_priv = vmw_priv(dev);
2955 	int ret;
2956 
2957 	if (WARN_ON(!dev_priv->suspend_state))
2958 		return 0;
2959 
2960 	ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2961 	dev_priv->suspend_state = NULL;
2962 
2963 	return ret;
2964 }
2965 
2966 /**
2967  * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2968  *
2969  * @dev: Pointer to the drm device
2970  */
vmw_kms_lost_device(struct drm_device * dev)2971 void vmw_kms_lost_device(struct drm_device *dev)
2972 {
2973 	drm_atomic_helper_shutdown(dev);
2974 }
2975 
2976 /**
2977  * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2978  * @update: The closure structure.
2979  *
2980  * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2981  * update on display unit.
2982  *
2983  * Return: 0 on success or a negative error code on failure.
2984  */
vmw_du_helper_plane_update(struct vmw_du_update_plane * update)2985 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2986 {
2987 	struct drm_plane_state *state = update->plane->state;
2988 	struct drm_plane_state *old_state = update->old_state;
2989 	struct drm_atomic_helper_damage_iter iter;
2990 	struct drm_rect clip;
2991 	struct drm_rect bb;
2992 	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2993 	uint32_t reserved_size = 0;
2994 	uint32_t submit_size = 0;
2995 	uint32_t curr_size = 0;
2996 	uint32_t num_hits = 0;
2997 	void *cmd_start;
2998 	char *cmd_next;
2999 	int ret;
3000 
3001 	/*
3002 	 * Iterate in advance to check if really need plane update and find the
3003 	 * number of clips that actually are in plane src for fifo allocation.
3004 	 */
3005 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
3006 	drm_atomic_for_each_plane_damage(&iter, &clip)
3007 		num_hits++;
3008 
3009 	if (num_hits == 0)
3010 		return 0;
3011 
3012 	if (update->vfb->bo) {
3013 		struct vmw_framebuffer_bo *vfbbo =
3014 			container_of(update->vfb, typeof(*vfbbo), base);
3015 
3016 		ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
3017 					    update->cpu_blit);
3018 	} else {
3019 		struct vmw_framebuffer_surface *vfbs =
3020 			container_of(update->vfb, typeof(*vfbs), base);
3021 
3022 		ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
3023 						  0, VMW_RES_DIRTY_NONE, NULL,
3024 						  NULL);
3025 	}
3026 
3027 	if (ret)
3028 		return ret;
3029 
3030 	ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
3031 	if (ret)
3032 		goto out_unref;
3033 
3034 	reserved_size = update->calc_fifo_size(update, num_hits);
3035 	cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
3036 	if (!cmd_start) {
3037 		ret = -ENOMEM;
3038 		goto out_revert;
3039 	}
3040 
3041 	cmd_next = cmd_start;
3042 
3043 	if (update->post_prepare) {
3044 		curr_size = update->post_prepare(update, cmd_next);
3045 		cmd_next += curr_size;
3046 		submit_size += curr_size;
3047 	}
3048 
3049 	if (update->pre_clip) {
3050 		curr_size = update->pre_clip(update, cmd_next, num_hits);
3051 		cmd_next += curr_size;
3052 		submit_size += curr_size;
3053 	}
3054 
3055 	bb.x1 = INT_MAX;
3056 	bb.y1 = INT_MAX;
3057 	bb.x2 = INT_MIN;
3058 	bb.y2 = INT_MIN;
3059 
3060 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
3061 	drm_atomic_for_each_plane_damage(&iter, &clip) {
3062 		uint32_t fb_x = clip.x1;
3063 		uint32_t fb_y = clip.y1;
3064 
3065 		vmw_du_translate_to_crtc(state, &clip);
3066 		if (update->clip) {
3067 			curr_size = update->clip(update, cmd_next, &clip, fb_x,
3068 						 fb_y);
3069 			cmd_next += curr_size;
3070 			submit_size += curr_size;
3071 		}
3072 		bb.x1 = min_t(int, bb.x1, clip.x1);
3073 		bb.y1 = min_t(int, bb.y1, clip.y1);
3074 		bb.x2 = max_t(int, bb.x2, clip.x2);
3075 		bb.y2 = max_t(int, bb.y2, clip.y2);
3076 	}
3077 
3078 	curr_size = update->post_clip(update, cmd_next, &bb);
3079 	submit_size += curr_size;
3080 
3081 	if (reserved_size < submit_size)
3082 		submit_size = 0;
3083 
3084 	vmw_cmd_commit(update->dev_priv, submit_size);
3085 
3086 	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
3087 					 update->out_fence, NULL);
3088 	return ret;
3089 
3090 out_revert:
3091 	vmw_validation_revert(&val_ctx);
3092 
3093 out_unref:
3094 	vmw_validation_unref_lists(&val_ctx);
3095 	return ret;
3096 }
3097