1 /*
2  * Copyright (C) 2013-2017 Oracle Corporation
3  * This file is based on ast_main.c
4  * Copyright 2012 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  *
26  * Authors: Dave Airlie <airlied@redhat.com>,
27  *          Michael Thayer <michael.thayer@oracle.com,
28  *          Hans de Goede <hdegoede@redhat.com>
29  */
30 #include <drm/drm_fb_helper.h>
31 #include <drm/drm_crtc_helper.h>
32 
33 #include "vbox_drv.h"
34 #include "vbox_err.h"
35 #include "vboxvideo_guest.h"
36 #include "vboxvideo_vbe.h"
37 
vbox_user_framebuffer_destroy(struct drm_framebuffer * fb)38 static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
39 {
40 	struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
41 
42 	if (vbox_fb->obj)
43 		drm_gem_object_put_unlocked(vbox_fb->obj);
44 
45 	drm_framebuffer_cleanup(fb);
46 	kfree(fb);
47 }
48 
vbox_enable_accel(struct vbox_private * vbox)49 void vbox_enable_accel(struct vbox_private *vbox)
50 {
51 	unsigned int i;
52 	struct vbva_buffer *vbva;
53 
54 	if (!vbox->vbva_info || !vbox->vbva_buffers) {
55 		/* Should never happen... */
56 		DRM_ERROR("vboxvideo: failed to set up VBVA.\n");
57 		return;
58 	}
59 
60 	for (i = 0; i < vbox->num_crtcs; ++i) {
61 		if (vbox->vbva_info[i].vbva)
62 			continue;
63 
64 		vbva = (void __force *)vbox->vbva_buffers +
65 			i * VBVA_MIN_BUFFER_SIZE;
66 		if (!vbva_enable(&vbox->vbva_info[i],
67 				 vbox->guest_pool, vbva, i)) {
68 			/* very old host or driver error. */
69 			DRM_ERROR("vboxvideo: vbva_enable failed\n");
70 			return;
71 		}
72 	}
73 }
74 
vbox_disable_accel(struct vbox_private * vbox)75 void vbox_disable_accel(struct vbox_private *vbox)
76 {
77 	unsigned int i;
78 
79 	for (i = 0; i < vbox->num_crtcs; ++i)
80 		vbva_disable(&vbox->vbva_info[i], vbox->guest_pool, i);
81 }
82 
vbox_report_caps(struct vbox_private * vbox)83 void vbox_report_caps(struct vbox_private *vbox)
84 {
85 	u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
86 		   VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY;
87 
88 	if (vbox->initial_mode_queried)
89 		caps |= VBVACAPS_VIDEO_MODE_HINTS;
90 
91 	hgsmi_send_caps_info(vbox->guest_pool, caps);
92 }
93 
94 /**
95  * Send information about dirty rectangles to VBVA.  If necessary we enable
96  * VBVA first, as this is normally disabled after a change of master in case
97  * the new master does not send dirty rectangle information (is this even
98  * allowed?)
99  */
vbox_framebuffer_dirty_rectangles(struct drm_framebuffer * fb,struct drm_clip_rect * rects,unsigned int num_rects)100 void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
101 				       struct drm_clip_rect *rects,
102 				       unsigned int num_rects)
103 {
104 	struct vbox_private *vbox = fb->dev->dev_private;
105 	struct drm_crtc *crtc;
106 	unsigned int i;
107 
108 	mutex_lock(&vbox->hw_mutex);
109 	list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
110 		if (CRTC_FB(crtc) != fb)
111 			continue;
112 
113 		vbox_enable_accel(vbox);
114 
115 		for (i = 0; i < num_rects; ++i) {
116 			struct vbva_cmd_hdr cmd_hdr;
117 			unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
118 
119 			if ((rects[i].x1 > crtc->x + crtc->hwmode.hdisplay) ||
120 			    (rects[i].y1 > crtc->y + crtc->hwmode.vdisplay) ||
121 			    (rects[i].x2 < crtc->x) ||
122 			    (rects[i].y2 < crtc->y))
123 				continue;
124 
125 			cmd_hdr.x = (s16)rects[i].x1;
126 			cmd_hdr.y = (s16)rects[i].y1;
127 			cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
128 			cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
129 
130 			if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id],
131 						      vbox->guest_pool))
132 				continue;
133 
134 			vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool,
135 				   &cmd_hdr, sizeof(cmd_hdr));
136 			vbva_buffer_end_update(&vbox->vbva_info[crtc_id]);
137 		}
138 	}
139 	mutex_unlock(&vbox->hw_mutex);
140 }
141 
vbox_user_framebuffer_dirty(struct drm_framebuffer * fb,struct drm_file * file_priv,unsigned int flags,unsigned int color,struct drm_clip_rect * rects,unsigned int num_rects)142 static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
143 				       struct drm_file *file_priv,
144 				       unsigned int flags, unsigned int color,
145 				       struct drm_clip_rect *rects,
146 				       unsigned int num_rects)
147 {
148 	vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
149 
150 	return 0;
151 }
152 
153 static const struct drm_framebuffer_funcs vbox_fb_funcs = {
154 	.destroy = vbox_user_framebuffer_destroy,
155 	.dirty = vbox_user_framebuffer_dirty,
156 };
157 
vbox_framebuffer_init(struct drm_device * dev,struct vbox_framebuffer * vbox_fb,const struct DRM_MODE_FB_CMD * mode_cmd,struct drm_gem_object * obj)158 int vbox_framebuffer_init(struct drm_device *dev,
159 			  struct vbox_framebuffer *vbox_fb,
160 			  const struct DRM_MODE_FB_CMD *mode_cmd,
161 			  struct drm_gem_object *obj)
162 {
163 	int ret;
164 
165 	drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd);
166 	vbox_fb->obj = obj;
167 	ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs);
168 	if (ret) {
169 		DRM_ERROR("framebuffer init failed %d\n", ret);
170 		return ret;
171 	}
172 
173 	return 0;
174 }
175 
vbox_user_framebuffer_create(struct drm_device * dev,struct drm_file * filp,const struct drm_mode_fb_cmd2 * mode_cmd)176 static struct drm_framebuffer *vbox_user_framebuffer_create(
177 		struct drm_device *dev,
178 		struct drm_file *filp,
179 		const struct drm_mode_fb_cmd2 *mode_cmd)
180 {
181 	struct drm_gem_object *obj;
182 	struct vbox_framebuffer *vbox_fb;
183 	int ret = -ENOMEM;
184 
185 	obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
186 	if (!obj)
187 		return ERR_PTR(-ENOENT);
188 
189 	vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
190 	if (!vbox_fb)
191 		goto err_unref_obj;
192 
193 	ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj);
194 	if (ret)
195 		goto err_free_vbox_fb;
196 
197 	return &vbox_fb->base;
198 
199 err_free_vbox_fb:
200 	kfree(vbox_fb);
201 err_unref_obj:
202 	drm_gem_object_put_unlocked(obj);
203 	return ERR_PTR(ret);
204 }
205 
206 static const struct drm_mode_config_funcs vbox_mode_funcs = {
207 	.fb_create = vbox_user_framebuffer_create,
208 };
209 
vbox_accel_init(struct vbox_private * vbox)210 static int vbox_accel_init(struct vbox_private *vbox)
211 {
212 	unsigned int i;
213 
214 	vbox->vbva_info = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
215 				       sizeof(*vbox->vbva_info), GFP_KERNEL);
216 	if (!vbox->vbva_info)
217 		return -ENOMEM;
218 
219 	/* Take a command buffer for each screen from the end of usable VRAM. */
220 	vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
221 
222 	vbox->vbva_buffers = pci_iomap_range(vbox->dev->pdev, 0,
223 					     vbox->available_vram_size,
224 					     vbox->num_crtcs *
225 					     VBVA_MIN_BUFFER_SIZE);
226 	if (!vbox->vbva_buffers)
227 		return -ENOMEM;
228 
229 	for (i = 0; i < vbox->num_crtcs; ++i)
230 		vbva_setup_buffer_context(&vbox->vbva_info[i],
231 					  vbox->available_vram_size +
232 					  i * VBVA_MIN_BUFFER_SIZE,
233 					  VBVA_MIN_BUFFER_SIZE);
234 
235 	return 0;
236 }
237 
vbox_accel_fini(struct vbox_private * vbox)238 static void vbox_accel_fini(struct vbox_private *vbox)
239 {
240 	vbox_disable_accel(vbox);
241 	pci_iounmap(vbox->dev->pdev, vbox->vbva_buffers);
242 }
243 
244 /** Do we support the 4.3 plus mode hint reporting interface? */
have_hgsmi_mode_hints(struct vbox_private * vbox)245 static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
246 {
247 	u32 have_hints, have_cursor;
248 	int ret;
249 
250 	ret = hgsmi_query_conf(vbox->guest_pool,
251 			       VBOX_VBVA_CONF32_MODE_HINT_REPORTING,
252 			       &have_hints);
253 	if (ret)
254 		return false;
255 
256 	ret = hgsmi_query_conf(vbox->guest_pool,
257 			       VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING,
258 			       &have_cursor);
259 	if (ret)
260 		return false;
261 
262 	return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS;
263 }
264 
vbox_check_supported(u16 id)265 static bool vbox_check_supported(u16 id)
266 {
267 	u16 dispi_id;
268 
269 	vbox_write_ioport(VBE_DISPI_INDEX_ID, id);
270 	dispi_id = inw(VBE_DISPI_IOPORT_DATA);
271 
272 	return dispi_id == id;
273 }
274 
275 /**
276  * Set up our heaps and data exchange buffers in VRAM before handing the rest
277  * to the memory manager.
278  */
vbox_hw_init(struct vbox_private * vbox)279 static int vbox_hw_init(struct vbox_private *vbox)
280 {
281 	int ret = -ENOMEM;
282 
283 	vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA);
284 	vbox->any_pitch = vbox_check_supported(VBE_DISPI_ID_ANYX);
285 
286 	DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
287 
288 	/* Map guest-heap at end of vram */
289 	vbox->guest_heap =
290 	    pci_iomap_range(vbox->dev->pdev, 0, GUEST_HEAP_OFFSET(vbox),
291 			    GUEST_HEAP_SIZE);
292 	if (!vbox->guest_heap)
293 		return -ENOMEM;
294 
295 	/* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
296 	vbox->guest_pool = gen_pool_create(4, -1);
297 	if (!vbox->guest_pool)
298 		goto err_unmap_guest_heap;
299 
300 	ret = gen_pool_add_virt(vbox->guest_pool,
301 				(unsigned long)vbox->guest_heap,
302 				GUEST_HEAP_OFFSET(vbox),
303 				GUEST_HEAP_USABLE_SIZE, -1);
304 	if (ret)
305 		goto err_destroy_guest_pool;
306 
307 	ret = hgsmi_test_query_conf(vbox->guest_pool);
308 	if (ret) {
309 		DRM_ERROR("vboxvideo: hgsmi_test_query_conf failed\n");
310 		goto err_destroy_guest_pool;
311 	}
312 
313 	/* Reduce available VRAM size to reflect the guest heap. */
314 	vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox);
315 	/* Linux drm represents monitors as a 32-bit array. */
316 	hgsmi_query_conf(vbox->guest_pool, VBOX_VBVA_CONF32_MONITOR_COUNT,
317 			 &vbox->num_crtcs);
318 	vbox->num_crtcs = clamp_t(u32, vbox->num_crtcs, 1, VBOX_MAX_SCREENS);
319 
320 	if (!have_hgsmi_mode_hints(vbox)) {
321 		ret = -ENOTSUPP;
322 		goto err_destroy_guest_pool;
323 	}
324 
325 	vbox->last_mode_hints = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
326 					     sizeof(struct vbva_modehint),
327 					     GFP_KERNEL);
328 	if (!vbox->last_mode_hints) {
329 		ret = -ENOMEM;
330 		goto err_destroy_guest_pool;
331 	}
332 
333 	ret = vbox_accel_init(vbox);
334 	if (ret)
335 		goto err_destroy_guest_pool;
336 
337 	return 0;
338 
339 err_destroy_guest_pool:
340 	gen_pool_destroy(vbox->guest_pool);
341 err_unmap_guest_heap:
342 	pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
343 	return ret;
344 }
345 
vbox_hw_fini(struct vbox_private * vbox)346 static void vbox_hw_fini(struct vbox_private *vbox)
347 {
348 	vbox_accel_fini(vbox);
349 	gen_pool_destroy(vbox->guest_pool);
350 	pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
351 }
352 
vbox_driver_load(struct drm_device * dev)353 int vbox_driver_load(struct drm_device *dev)
354 {
355 	struct vbox_private *vbox;
356 	int ret = 0;
357 
358 	if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
359 		return -ENODEV;
360 
361 	vbox = devm_kzalloc(dev->dev, sizeof(*vbox), GFP_KERNEL);
362 	if (!vbox)
363 		return -ENOMEM;
364 
365 	dev->dev_private = vbox;
366 	vbox->dev = dev;
367 
368 	mutex_init(&vbox->hw_mutex);
369 
370 	ret = vbox_hw_init(vbox);
371 	if (ret)
372 		return ret;
373 
374 	ret = vbox_mm_init(vbox);
375 	if (ret)
376 		goto err_hw_fini;
377 
378 	drm_mode_config_init(dev);
379 
380 	dev->mode_config.funcs = (void *)&vbox_mode_funcs;
381 	dev->mode_config.min_width = 64;
382 	dev->mode_config.min_height = 64;
383 	dev->mode_config.preferred_depth = 24;
384 	dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
385 	dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
386 
387 	ret = vbox_mode_init(dev);
388 	if (ret)
389 		goto err_drm_mode_cleanup;
390 
391 	ret = vbox_irq_init(vbox);
392 	if (ret)
393 		goto err_mode_fini;
394 
395 	ret = vbox_fbdev_init(dev);
396 	if (ret)
397 		goto err_irq_fini;
398 
399 	return 0;
400 
401 err_irq_fini:
402 	vbox_irq_fini(vbox);
403 err_mode_fini:
404 	vbox_mode_fini(dev);
405 err_drm_mode_cleanup:
406 	drm_mode_config_cleanup(dev);
407 	vbox_mm_fini(vbox);
408 err_hw_fini:
409 	vbox_hw_fini(vbox);
410 	return ret;
411 }
412 
vbox_driver_unload(struct drm_device * dev)413 void vbox_driver_unload(struct drm_device *dev)
414 {
415 	struct vbox_private *vbox = dev->dev_private;
416 
417 	vbox_fbdev_fini(dev);
418 	vbox_irq_fini(vbox);
419 	vbox_mode_fini(dev);
420 	drm_mode_config_cleanup(dev);
421 	vbox_mm_fini(vbox);
422 	vbox_hw_fini(vbox);
423 }
424 
425 /**
426  * @note this is described in the DRM framework documentation.  AST does not
427  * have it, but we get an oops on driver unload if it is not present.
428  */
vbox_driver_lastclose(struct drm_device * dev)429 void vbox_driver_lastclose(struct drm_device *dev)
430 {
431 	struct vbox_private *vbox = dev->dev_private;
432 
433 	if (vbox->fbdev)
434 		drm_fb_helper_restore_fbdev_mode_unlocked(&vbox->fbdev->helper);
435 }
436 
vbox_gem_create(struct drm_device * dev,u32 size,bool iskernel,struct drm_gem_object ** obj)437 int vbox_gem_create(struct drm_device *dev,
438 		    u32 size, bool iskernel, struct drm_gem_object **obj)
439 {
440 	struct vbox_bo *vboxbo;
441 	int ret;
442 
443 	*obj = NULL;
444 
445 	size = roundup(size, PAGE_SIZE);
446 	if (size == 0)
447 		return -EINVAL;
448 
449 	ret = vbox_bo_create(dev, size, 0, 0, &vboxbo);
450 	if (ret) {
451 		if (ret != -ERESTARTSYS)
452 			DRM_ERROR("failed to allocate GEM object\n");
453 		return ret;
454 	}
455 
456 	*obj = &vboxbo->gem;
457 
458 	return 0;
459 }
460 
vbox_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)461 int vbox_dumb_create(struct drm_file *file,
462 		     struct drm_device *dev, struct drm_mode_create_dumb *args)
463 {
464 	int ret;
465 	struct drm_gem_object *gobj;
466 	u32 handle;
467 
468 	args->pitch = args->width * ((args->bpp + 7) / 8);
469 	args->size = args->pitch * args->height;
470 
471 	ret = vbox_gem_create(dev, args->size, false, &gobj);
472 	if (ret)
473 		return ret;
474 
475 	ret = drm_gem_handle_create(file, gobj, &handle);
476 	drm_gem_object_put_unlocked(gobj);
477 	if (ret)
478 		return ret;
479 
480 	args->handle = handle;
481 
482 	return 0;
483 }
484 
vbox_bo_unref(struct vbox_bo ** bo)485 static void vbox_bo_unref(struct vbox_bo **bo)
486 {
487 	struct ttm_buffer_object *tbo;
488 
489 	if ((*bo) == NULL)
490 		return;
491 
492 	tbo = &((*bo)->bo);
493 	ttm_bo_unref(&tbo);
494 	if (!tbo)
495 		*bo = NULL;
496 }
497 
vbox_gem_free_object(struct drm_gem_object * obj)498 void vbox_gem_free_object(struct drm_gem_object *obj)
499 {
500 	struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
501 
502 	vbox_bo_unref(&vbox_bo);
503 }
504 
vbox_bo_mmap_offset(struct vbox_bo * bo)505 static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
506 {
507 	return drm_vma_node_offset_addr(&bo->bo.vma_node);
508 }
509 
510 int
vbox_dumb_mmap_offset(struct drm_file * file,struct drm_device * dev,u32 handle,u64 * offset)511 vbox_dumb_mmap_offset(struct drm_file *file,
512 		      struct drm_device *dev,
513 		      u32 handle, u64 *offset)
514 {
515 	struct drm_gem_object *obj;
516 	int ret;
517 	struct vbox_bo *bo;
518 
519 	mutex_lock(&dev->struct_mutex);
520 	obj = drm_gem_object_lookup(file, handle);
521 	if (!obj) {
522 		ret = -ENOENT;
523 		goto out_unlock;
524 	}
525 
526 	bo = gem_to_vbox_bo(obj);
527 	*offset = vbox_bo_mmap_offset(bo);
528 
529 	drm_gem_object_put(obj);
530 	ret = 0;
531 
532 out_unlock:
533 	mutex_unlock(&dev->struct_mutex);
534 	return ret;
535 }
536