1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25 
26 #include <drm/drmP.h>
27 #include <drm/drm_fb_helper.h>
28 #include "virtgpu_drv.h"
29 
30 #define VIRTIO_GPU_FBCON_POLL_PERIOD (HZ / 60)
31 
32 struct virtio_gpu_fbdev {
33 	struct drm_fb_helper           helper;
34 	struct virtio_gpu_framebuffer  vgfb;
35 	struct virtio_gpu_device       *vgdev;
36 	struct delayed_work            work;
37 };
38 
virtio_gpu_dirty_update(struct virtio_gpu_framebuffer * fb,bool store,int x,int y,int width,int height)39 static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
40 				   bool store, int x, int y,
41 				   int width, int height)
42 {
43 	struct drm_device *dev = fb->base.dev;
44 	struct virtio_gpu_device *vgdev = dev->dev_private;
45 	bool store_for_later = false;
46 	int bpp = fb->base.format->cpp[0];
47 	int x2, y2;
48 	unsigned long flags;
49 	struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);
50 
51 	if ((width <= 0) ||
52 	    (x + width > fb->base.width) ||
53 	    (y + height > fb->base.height)) {
54 		DRM_DEBUG("values out of range %dx%d+%d+%d, fb %dx%d\n",
55 			  width, height, x, y,
56 			  fb->base.width, fb->base.height);
57 		return -EINVAL;
58 	}
59 
60 	/*
61 	 * Can be called with pretty much any context (console output
62 	 * path).  If we are in atomic just store the dirty rect info
63 	 * to send out the update later.
64 	 *
65 	 * Can't test inside spin lock.
66 	 */
67 	if (in_atomic() || store)
68 		store_for_later = true;
69 
70 	x2 = x + width - 1;
71 	y2 = y + height - 1;
72 
73 	spin_lock_irqsave(&fb->dirty_lock, flags);
74 
75 	if (fb->y1 < y)
76 		y = fb->y1;
77 	if (fb->y2 > y2)
78 		y2 = fb->y2;
79 	if (fb->x1 < x)
80 		x = fb->x1;
81 	if (fb->x2 > x2)
82 		x2 = fb->x2;
83 
84 	if (store_for_later) {
85 		fb->x1 = x;
86 		fb->x2 = x2;
87 		fb->y1 = y;
88 		fb->y2 = y2;
89 		spin_unlock_irqrestore(&fb->dirty_lock, flags);
90 		return 0;
91 	}
92 
93 	fb->x1 = fb->y1 = INT_MAX;
94 	fb->x2 = fb->y2 = 0;
95 
96 	spin_unlock_irqrestore(&fb->dirty_lock, flags);
97 
98 	{
99 		uint32_t offset;
100 		uint32_t w = x2 - x + 1;
101 		uint32_t h = y2 - y + 1;
102 
103 		offset = (y * fb->base.pitches[0]) + x * bpp;
104 
105 		virtio_gpu_cmd_transfer_to_host_2d(vgdev, obj->hw_res_handle,
106 						   offset,
107 						   cpu_to_le32(w),
108 						   cpu_to_le32(h),
109 						   cpu_to_le32(x),
110 						   cpu_to_le32(y),
111 						   NULL);
112 
113 	}
114 	virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle,
115 				      x, y, x2 - x + 1, y2 - y + 1);
116 	return 0;
117 }
118 
virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer * vgfb,struct drm_clip_rect * clips,unsigned int num_clips)119 int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb,
120 			     struct drm_clip_rect *clips,
121 			     unsigned int num_clips)
122 {
123 	struct virtio_gpu_device *vgdev = vgfb->base.dev->dev_private;
124 	struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
125 	struct drm_clip_rect norect;
126 	struct drm_clip_rect *clips_ptr;
127 	int left, right, top, bottom;
128 	int i;
129 	int inc = 1;
130 
131 	if (!num_clips) {
132 		num_clips = 1;
133 		clips = &norect;
134 		norect.x1 = norect.y1 = 0;
135 		norect.x2 = vgfb->base.width;
136 		norect.y2 = vgfb->base.height;
137 	}
138 	left = clips->x1;
139 	right = clips->x2;
140 	top = clips->y1;
141 	bottom = clips->y2;
142 
143 	/* skip the first clip rect */
144 	for (i = 1, clips_ptr = clips + inc;
145 	     i < num_clips; i++, clips_ptr += inc) {
146 		left = min_t(int, left, (int)clips_ptr->x1);
147 		right = max_t(int, right, (int)clips_ptr->x2);
148 		top = min_t(int, top, (int)clips_ptr->y1);
149 		bottom = max_t(int, bottom, (int)clips_ptr->y2);
150 	}
151 
152 	if (obj->dumb)
153 		return virtio_gpu_dirty_update(vgfb, false, left, top,
154 					       right - left, bottom - top);
155 
156 	virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle,
157 				      left, top, right - left, bottom - top);
158 	return 0;
159 }
160 
virtio_gpu_fb_dirty_work(struct work_struct * work)161 static void virtio_gpu_fb_dirty_work(struct work_struct *work)
162 {
163 	struct delayed_work *delayed_work = to_delayed_work(work);
164 	struct virtio_gpu_fbdev *vfbdev =
165 		container_of(delayed_work, struct virtio_gpu_fbdev, work);
166 	struct virtio_gpu_framebuffer *vgfb = &vfbdev->vgfb;
167 
168 	virtio_gpu_dirty_update(&vfbdev->vgfb, false, vgfb->x1, vgfb->y1,
169 				vgfb->x2 - vgfb->x1, vgfb->y2 - vgfb->y1);
170 }
171 
virtio_gpu_3d_fillrect(struct fb_info * info,const struct fb_fillrect * rect)172 static void virtio_gpu_3d_fillrect(struct fb_info *info,
173 				   const struct fb_fillrect *rect)
174 {
175 	struct virtio_gpu_fbdev *vfbdev = info->par;
176 
177 	drm_fb_helper_sys_fillrect(info, rect);
178 	virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy,
179 			     rect->width, rect->height);
180 	schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
181 }
182 
virtio_gpu_3d_copyarea(struct fb_info * info,const struct fb_copyarea * area)183 static void virtio_gpu_3d_copyarea(struct fb_info *info,
184 				   const struct fb_copyarea *area)
185 {
186 	struct virtio_gpu_fbdev *vfbdev = info->par;
187 
188 	drm_fb_helper_sys_copyarea(info, area);
189 	virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy,
190 			   area->width, area->height);
191 	schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
192 }
193 
virtio_gpu_3d_imageblit(struct fb_info * info,const struct fb_image * image)194 static void virtio_gpu_3d_imageblit(struct fb_info *info,
195 				    const struct fb_image *image)
196 {
197 	struct virtio_gpu_fbdev *vfbdev = info->par;
198 
199 	drm_fb_helper_sys_imageblit(info, image);
200 	virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy,
201 			     image->width, image->height);
202 	schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
203 }
204 
205 static struct fb_ops virtio_gpufb_ops = {
206 	.owner = THIS_MODULE,
207 	DRM_FB_HELPER_DEFAULT_OPS,
208 	.fb_fillrect = virtio_gpu_3d_fillrect,
209 	.fb_copyarea = virtio_gpu_3d_copyarea,
210 	.fb_imageblit = virtio_gpu_3d_imageblit,
211 };
212 
virtio_gpu_vmap_fb(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * obj)213 static int virtio_gpu_vmap_fb(struct virtio_gpu_device *vgdev,
214 			      struct virtio_gpu_object *obj)
215 {
216 	return virtio_gpu_object_kmap(obj, NULL);
217 }
218 
virtio_gpufb_create(struct drm_fb_helper * helper,struct drm_fb_helper_surface_size * sizes)219 static int virtio_gpufb_create(struct drm_fb_helper *helper,
220 			       struct drm_fb_helper_surface_size *sizes)
221 {
222 	struct virtio_gpu_fbdev *vfbdev =
223 		container_of(helper, struct virtio_gpu_fbdev, helper);
224 	struct drm_device *dev = helper->dev;
225 	struct virtio_gpu_device *vgdev = dev->dev_private;
226 	struct fb_info *info;
227 	struct drm_framebuffer *fb;
228 	struct drm_mode_fb_cmd2 mode_cmd = {};
229 	struct virtio_gpu_object *obj;
230 	uint32_t resid, format, size;
231 	int ret;
232 
233 	mode_cmd.width = sizes->surface_width;
234 	mode_cmd.height = sizes->surface_height;
235 	mode_cmd.pitches[0] = mode_cmd.width * 4;
236 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(32, 24);
237 
238 	format = virtio_gpu_translate_format(mode_cmd.pixel_format);
239 	if (format == 0)
240 		return -EINVAL;
241 
242 	size = mode_cmd.pitches[0] * mode_cmd.height;
243 	obj = virtio_gpu_alloc_object(dev, size, false, true);
244 	if (IS_ERR(obj))
245 		return PTR_ERR(obj);
246 
247 	virtio_gpu_resource_id_get(vgdev, &resid);
248 	virtio_gpu_cmd_create_resource(vgdev, resid, format,
249 				       mode_cmd.width, mode_cmd.height);
250 
251 	ret = virtio_gpu_vmap_fb(vgdev, obj);
252 	if (ret) {
253 		DRM_ERROR("failed to vmap fb %d\n", ret);
254 		goto err_obj_vmap;
255 	}
256 
257 	/* attach the object to the resource */
258 	ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
259 	if (ret)
260 		goto err_obj_attach;
261 
262 	info = drm_fb_helper_alloc_fbi(helper);
263 	if (IS_ERR(info)) {
264 		ret = PTR_ERR(info);
265 		goto err_fb_alloc;
266 	}
267 
268 	info->par = helper;
269 
270 	ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb,
271 					  &mode_cmd, &obj->gem_base);
272 	if (ret)
273 		goto err_fb_alloc;
274 
275 	fb = &vfbdev->vgfb.base;
276 
277 	vfbdev->helper.fb = fb;
278 
279 	strcpy(info->fix.id, "virtiodrmfb");
280 	info->fbops = &virtio_gpufb_ops;
281 	info->pixmap.flags = FB_PIXMAP_SYSTEM;
282 
283 	info->screen_buffer = obj->vmap;
284 	info->screen_size = obj->gem_base.size;
285 	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
286 	drm_fb_helper_fill_var(info, &vfbdev->helper,
287 			       sizes->fb_width, sizes->fb_height);
288 
289 	info->fix.mmio_start = 0;
290 	info->fix.mmio_len = 0;
291 	return 0;
292 
293 err_fb_alloc:
294 	virtio_gpu_cmd_resource_inval_backing(vgdev, resid);
295 err_obj_attach:
296 err_obj_vmap:
297 	virtio_gpu_gem_free_object(&obj->gem_base);
298 	return ret;
299 }
300 
virtio_gpu_fbdev_destroy(struct drm_device * dev,struct virtio_gpu_fbdev * vgfbdev)301 static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
302 				    struct virtio_gpu_fbdev *vgfbdev)
303 {
304 	struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb;
305 
306 	drm_fb_helper_unregister_fbi(&vgfbdev->helper);
307 
308 	if (vgfb->base.obj[0])
309 		vgfb->base.obj[0] = NULL;
310 	drm_fb_helper_fini(&vgfbdev->helper);
311 	drm_framebuffer_cleanup(&vgfb->base);
312 
313 	return 0;
314 }
315 static const struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = {
316 	.fb_probe = virtio_gpufb_create,
317 };
318 
virtio_gpu_fbdev_init(struct virtio_gpu_device * vgdev)319 int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev)
320 {
321 	struct virtio_gpu_fbdev *vgfbdev;
322 	int bpp_sel = 32; /* TODO: parameter from somewhere? */
323 	int ret;
324 
325 	vgfbdev = kzalloc(sizeof(struct virtio_gpu_fbdev), GFP_KERNEL);
326 	if (!vgfbdev)
327 		return -ENOMEM;
328 
329 	vgfbdev->vgdev = vgdev;
330 	vgdev->vgfbdev = vgfbdev;
331 	INIT_DELAYED_WORK(&vgfbdev->work, virtio_gpu_fb_dirty_work);
332 
333 	drm_fb_helper_prepare(vgdev->ddev, &vgfbdev->helper,
334 			      &virtio_gpu_fb_helper_funcs);
335 	ret = drm_fb_helper_init(vgdev->ddev, &vgfbdev->helper,
336 				 VIRTIO_GPUFB_CONN_LIMIT);
337 	if (ret) {
338 		kfree(vgfbdev);
339 		return ret;
340 	}
341 
342 	drm_fb_helper_single_add_all_connectors(&vgfbdev->helper);
343 	drm_fb_helper_initial_config(&vgfbdev->helper, bpp_sel);
344 	return 0;
345 }
346 
virtio_gpu_fbdev_fini(struct virtio_gpu_device * vgdev)347 void virtio_gpu_fbdev_fini(struct virtio_gpu_device *vgdev)
348 {
349 	if (!vgdev->vgfbdev)
350 		return;
351 
352 	virtio_gpu_fbdev_destroy(vgdev->ddev, vgdev->vgfbdev);
353 	kfree(vgdev->vgfbdev);
354 	vgdev->vgfbdev = NULL;
355 }
356