1 /**************************************************************************
2  *
3  * Copyright © 2007 David Airlie
4  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 
29 #include <linux/export.h>
30 
31 #include <drm/drmP.h>
32 #include "vmwgfx_drv.h"
33 #include "vmwgfx_kms.h"
34 
35 #include <drm/ttm/ttm_placement.h>
36 
37 #define VMW_DIRTY_DELAY (HZ / 30)
38 
39 struct vmw_fb_par {
40 	struct vmw_private *vmw_priv;
41 
42 	void *vmalloc;
43 
44 	struct mutex bo_mutex;
45 	struct vmw_buffer_object *vmw_bo;
46 	unsigned bo_size;
47 	struct drm_framebuffer *set_fb;
48 	struct drm_display_mode *set_mode;
49 	u32 fb_x;
50 	u32 fb_y;
51 	bool bo_iowrite;
52 
53 	u32 pseudo_palette[17];
54 
55 	unsigned max_width;
56 	unsigned max_height;
57 
58 	struct {
59 		spinlock_t lock;
60 		bool active;
61 		unsigned x1;
62 		unsigned y1;
63 		unsigned x2;
64 		unsigned y2;
65 	} dirty;
66 
67 	struct drm_crtc *crtc;
68 	struct drm_connector *con;
69 	struct delayed_work local_work;
70 };
71 
vmw_fb_setcolreg(unsigned regno,unsigned red,unsigned green,unsigned blue,unsigned transp,struct fb_info * info)72 static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
73 			    unsigned blue, unsigned transp,
74 			    struct fb_info *info)
75 {
76 	struct vmw_fb_par *par = info->par;
77 	u32 *pal = par->pseudo_palette;
78 
79 	if (regno > 15) {
80 		DRM_ERROR("Bad regno %u.\n", regno);
81 		return 1;
82 	}
83 
84 	switch (par->set_fb->format->depth) {
85 	case 24:
86 	case 32:
87 		pal[regno] = ((red & 0xff00) << 8) |
88 			      (green & 0xff00) |
89 			     ((blue  & 0xff00) >> 8);
90 		break;
91 	default:
92 		DRM_ERROR("Bad depth %u, bpp %u.\n",
93 			  par->set_fb->format->depth,
94 			  par->set_fb->format->cpp[0] * 8);
95 		return 1;
96 	}
97 
98 	return 0;
99 }
100 
vmw_fb_check_var(struct fb_var_screeninfo * var,struct fb_info * info)101 static int vmw_fb_check_var(struct fb_var_screeninfo *var,
102 			    struct fb_info *info)
103 {
104 	int depth = var->bits_per_pixel;
105 	struct vmw_fb_par *par = info->par;
106 	struct vmw_private *vmw_priv = par->vmw_priv;
107 
108 	switch (var->bits_per_pixel) {
109 	case 32:
110 		depth = (var->transp.length > 0) ? 32 : 24;
111 		break;
112 	default:
113 		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
114 		return -EINVAL;
115 	}
116 
117 	switch (depth) {
118 	case 24:
119 		var->red.offset = 16;
120 		var->green.offset = 8;
121 		var->blue.offset = 0;
122 		var->red.length = 8;
123 		var->green.length = 8;
124 		var->blue.length = 8;
125 		var->transp.length = 0;
126 		var->transp.offset = 0;
127 		break;
128 	case 32:
129 		var->red.offset = 16;
130 		var->green.offset = 8;
131 		var->blue.offset = 0;
132 		var->red.length = 8;
133 		var->green.length = 8;
134 		var->blue.length = 8;
135 		var->transp.length = 8;
136 		var->transp.offset = 24;
137 		break;
138 	default:
139 		DRM_ERROR("Bad depth %u.\n", depth);
140 		return -EINVAL;
141 	}
142 
143 	if ((var->xoffset + var->xres) > par->max_width ||
144 	    (var->yoffset + var->yres) > par->max_height) {
145 		DRM_ERROR("Requested geom can not fit in framebuffer\n");
146 		return -EINVAL;
147 	}
148 
149 	if (!vmw_kms_validate_mode_vram(vmw_priv,
150 					var->xres * var->bits_per_pixel/8,
151 					var->yoffset + var->yres)) {
152 		DRM_ERROR("Requested geom can not fit in framebuffer\n");
153 		return -EINVAL;
154 	}
155 
156 	return 0;
157 }
158 
vmw_fb_blank(int blank,struct fb_info * info)159 static int vmw_fb_blank(int blank, struct fb_info *info)
160 {
161 	return 0;
162 }
163 
164 /**
165  * vmw_fb_dirty_flush - flush dirty regions to the kms framebuffer
166  *
167  * @work: The struct work_struct associated with this task.
168  *
169  * This function flushes the dirty regions of the vmalloc framebuffer to the
170  * kms framebuffer, and if the kms framebuffer is visible, also updated the
171  * corresponding displays. Note that this function runs even if the kms
172  * framebuffer is not bound to a crtc and thus not visible, but it's turned
173  * off during hibernation using the par->dirty.active bool.
174  */
vmw_fb_dirty_flush(struct work_struct * work)175 static void vmw_fb_dirty_flush(struct work_struct *work)
176 {
177 	struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
178 					      local_work.work);
179 	struct vmw_private *vmw_priv = par->vmw_priv;
180 	struct fb_info *info = vmw_priv->fb_info;
181 	unsigned long irq_flags;
182 	s32 dst_x1, dst_x2, dst_y1, dst_y2, w = 0, h = 0;
183 	u32 cpp, max_x, max_y;
184 	struct drm_clip_rect clip;
185 	struct drm_framebuffer *cur_fb;
186 	u8 *src_ptr, *dst_ptr;
187 	struct vmw_buffer_object *vbo = par->vmw_bo;
188 	void *virtual;
189 
190 	if (!READ_ONCE(par->dirty.active))
191 		return;
192 
193 	mutex_lock(&par->bo_mutex);
194 	cur_fb = par->set_fb;
195 	if (!cur_fb)
196 		goto out_unlock;
197 
198 	(void) ttm_read_lock(&vmw_priv->reservation_sem, false);
199 	(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
200 	virtual = vmw_bo_map_and_cache(vbo);
201 	if (!virtual)
202 		goto out_unreserve;
203 
204 	spin_lock_irqsave(&par->dirty.lock, irq_flags);
205 	if (!par->dirty.active) {
206 		spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
207 		goto out_unreserve;
208 	}
209 
210 	/*
211 	 * Handle panning when copying from vmalloc to framebuffer.
212 	 * Clip dirty area to framebuffer.
213 	 */
214 	cpp = cur_fb->format->cpp[0];
215 	max_x = par->fb_x + cur_fb->width;
216 	max_y = par->fb_y + cur_fb->height;
217 
218 	dst_x1 = par->dirty.x1 - par->fb_x;
219 	dst_y1 = par->dirty.y1 - par->fb_y;
220 	dst_x1 = max_t(s32, dst_x1, 0);
221 	dst_y1 = max_t(s32, dst_y1, 0);
222 
223 	dst_x2 = par->dirty.x2 - par->fb_x;
224 	dst_y2 = par->dirty.y2 - par->fb_y;
225 	dst_x2 = min_t(s32, dst_x2, max_x);
226 	dst_y2 = min_t(s32, dst_y2, max_y);
227 	w = dst_x2 - dst_x1;
228 	h = dst_y2 - dst_y1;
229 	w = max_t(s32, 0, w);
230 	h = max_t(s32, 0, h);
231 
232 	par->dirty.x1 = par->dirty.x2 = 0;
233 	par->dirty.y1 = par->dirty.y2 = 0;
234 	spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
235 
236 	if (w && h) {
237 		dst_ptr = (u8 *)virtual  +
238 			(dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
239 		src_ptr = (u8 *)par->vmalloc +
240 			((dst_y1 + par->fb_y) * info->fix.line_length +
241 			 (dst_x1 + par->fb_x) * cpp);
242 
243 		while (h-- > 0) {
244 			memcpy(dst_ptr, src_ptr, w*cpp);
245 			dst_ptr += par->set_fb->pitches[0];
246 			src_ptr += info->fix.line_length;
247 		}
248 
249 		clip.x1 = dst_x1;
250 		clip.x2 = dst_x2;
251 		clip.y1 = dst_y1;
252 		clip.y2 = dst_y2;
253 	}
254 
255 out_unreserve:
256 	ttm_bo_unreserve(&vbo->base);
257 	ttm_read_unlock(&vmw_priv->reservation_sem);
258 	if (w && h) {
259 		WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
260 						       &clip, 1));
261 		vmw_fifo_flush(vmw_priv, false);
262 	}
263 out_unlock:
264 	mutex_unlock(&par->bo_mutex);
265 }
266 
vmw_fb_dirty_mark(struct vmw_fb_par * par,unsigned x1,unsigned y1,unsigned width,unsigned height)267 static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
268 			      unsigned x1, unsigned y1,
269 			      unsigned width, unsigned height)
270 {
271 	unsigned long flags;
272 	unsigned x2 = x1 + width;
273 	unsigned y2 = y1 + height;
274 
275 	spin_lock_irqsave(&par->dirty.lock, flags);
276 	if (par->dirty.x1 == par->dirty.x2) {
277 		par->dirty.x1 = x1;
278 		par->dirty.y1 = y1;
279 		par->dirty.x2 = x2;
280 		par->dirty.y2 = y2;
281 		/* if we are active start the dirty work
282 		 * we share the work with the defio system */
283 		if (par->dirty.active)
284 			schedule_delayed_work(&par->local_work,
285 					      VMW_DIRTY_DELAY);
286 	} else {
287 		if (x1 < par->dirty.x1)
288 			par->dirty.x1 = x1;
289 		if (y1 < par->dirty.y1)
290 			par->dirty.y1 = y1;
291 		if (x2 > par->dirty.x2)
292 			par->dirty.x2 = x2;
293 		if (y2 > par->dirty.y2)
294 			par->dirty.y2 = y2;
295 	}
296 	spin_unlock_irqrestore(&par->dirty.lock, flags);
297 }
298 
vmw_fb_pan_display(struct fb_var_screeninfo * var,struct fb_info * info)299 static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
300 			      struct fb_info *info)
301 {
302 	struct vmw_fb_par *par = info->par;
303 
304 	if ((var->xoffset + var->xres) > var->xres_virtual ||
305 	    (var->yoffset + var->yres) > var->yres_virtual) {
306 		DRM_ERROR("Requested panning can not fit in framebuffer\n");
307 		return -EINVAL;
308 	}
309 
310 	mutex_lock(&par->bo_mutex);
311 	par->fb_x = var->xoffset;
312 	par->fb_y = var->yoffset;
313 	if (par->set_fb)
314 		vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
315 				  par->set_fb->height);
316 	mutex_unlock(&par->bo_mutex);
317 
318 	return 0;
319 }
320 
vmw_deferred_io(struct fb_info * info,struct list_head * pagelist)321 static void vmw_deferred_io(struct fb_info *info,
322 			    struct list_head *pagelist)
323 {
324 	struct vmw_fb_par *par = info->par;
325 	unsigned long start, end, min, max;
326 	unsigned long flags;
327 	struct page *page;
328 	int y1, y2;
329 
330 	min = ULONG_MAX;
331 	max = 0;
332 	list_for_each_entry(page, pagelist, lru) {
333 		start = page->index << PAGE_SHIFT;
334 		end = start + PAGE_SIZE - 1;
335 		min = min(min, start);
336 		max = max(max, end);
337 	}
338 
339 	if (min < max) {
340 		y1 = min / info->fix.line_length;
341 		y2 = (max / info->fix.line_length) + 1;
342 
343 		spin_lock_irqsave(&par->dirty.lock, flags);
344 		par->dirty.x1 = 0;
345 		par->dirty.y1 = y1;
346 		par->dirty.x2 = info->var.xres;
347 		par->dirty.y2 = y2;
348 		spin_unlock_irqrestore(&par->dirty.lock, flags);
349 
350 		/*
351 		 * Since we've already waited on this work once, try to
352 		 * execute asap.
353 		 */
354 		cancel_delayed_work(&par->local_work);
355 		schedule_delayed_work(&par->local_work, 0);
356 	}
357 };
358 
359 static struct fb_deferred_io vmw_defio = {
360 	.delay		= VMW_DIRTY_DELAY,
361 	.deferred_io	= vmw_deferred_io,
362 };
363 
364 /*
365  * Draw code
366  */
367 
vmw_fb_fillrect(struct fb_info * info,const struct fb_fillrect * rect)368 static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
369 {
370 	cfb_fillrect(info, rect);
371 	vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
372 			  rect->width, rect->height);
373 }
374 
vmw_fb_copyarea(struct fb_info * info,const struct fb_copyarea * region)375 static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
376 {
377 	cfb_copyarea(info, region);
378 	vmw_fb_dirty_mark(info->par, region->dx, region->dy,
379 			  region->width, region->height);
380 }
381 
vmw_fb_imageblit(struct fb_info * info,const struct fb_image * image)382 static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
383 {
384 	cfb_imageblit(info, image);
385 	vmw_fb_dirty_mark(info->par, image->dx, image->dy,
386 			  image->width, image->height);
387 }
388 
389 /*
390  * Bring up code
391  */
392 
vmw_fb_create_bo(struct vmw_private * vmw_priv,size_t size,struct vmw_buffer_object ** out)393 static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
394 			    size_t size, struct vmw_buffer_object **out)
395 {
396 	struct vmw_buffer_object *vmw_bo;
397 	int ret;
398 
399 	(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
400 
401 	vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
402 	if (!vmw_bo) {
403 		ret = -ENOMEM;
404 		goto err_unlock;
405 	}
406 
407 	ret = vmw_bo_init(vmw_priv, vmw_bo, size,
408 			      &vmw_sys_placement,
409 			      false,
410 			      &vmw_bo_bo_free);
411 	if (unlikely(ret != 0))
412 		goto err_unlock; /* init frees the buffer on failure */
413 
414 	*out = vmw_bo;
415 	ttm_write_unlock(&vmw_priv->reservation_sem);
416 
417 	return 0;
418 
419 err_unlock:
420 	ttm_write_unlock(&vmw_priv->reservation_sem);
421 	return ret;
422 }
423 
vmw_fb_compute_depth(struct fb_var_screeninfo * var,int * depth)424 static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
425 				int *depth)
426 {
427 	switch (var->bits_per_pixel) {
428 	case 32:
429 		*depth = (var->transp.length > 0) ? 32 : 24;
430 		break;
431 	default:
432 		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
433 		return -EINVAL;
434 	}
435 
436 	return 0;
437 }
438 
vmwgfx_set_config_internal(struct drm_mode_set * set)439 static int vmwgfx_set_config_internal(struct drm_mode_set *set)
440 {
441 	struct drm_crtc *crtc = set->crtc;
442 	struct drm_modeset_acquire_ctx ctx;
443 	int ret;
444 
445 	drm_modeset_acquire_init(&ctx, 0);
446 
447 restart:
448 	ret = crtc->funcs->set_config(set, &ctx);
449 
450 	if (ret == -EDEADLK) {
451 		drm_modeset_backoff(&ctx);
452 		goto restart;
453 	}
454 
455 	drm_modeset_drop_locks(&ctx);
456 	drm_modeset_acquire_fini(&ctx);
457 
458 	return ret;
459 }
460 
vmw_fb_kms_detach(struct vmw_fb_par * par,bool detach_bo,bool unref_bo)461 static int vmw_fb_kms_detach(struct vmw_fb_par *par,
462 			     bool detach_bo,
463 			     bool unref_bo)
464 {
465 	struct drm_framebuffer *cur_fb = par->set_fb;
466 	int ret;
467 
468 	/* Detach the KMS framebuffer from crtcs */
469 	if (par->set_mode) {
470 		struct drm_mode_set set;
471 
472 		set.crtc = par->crtc;
473 		set.x = 0;
474 		set.y = 0;
475 		set.mode = NULL;
476 		set.fb = NULL;
477 		set.num_connectors = 0;
478 		set.connectors = &par->con;
479 		ret = vmwgfx_set_config_internal(&set);
480 		if (ret) {
481 			DRM_ERROR("Could not unset a mode.\n");
482 			return ret;
483 		}
484 		drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
485 		par->set_mode = NULL;
486 	}
487 
488 	if (cur_fb) {
489 		drm_framebuffer_put(cur_fb);
490 		par->set_fb = NULL;
491 	}
492 
493 	if (par->vmw_bo && detach_bo && unref_bo)
494 		vmw_bo_unreference(&par->vmw_bo);
495 
496 	return 0;
497 }
498 
vmw_fb_kms_framebuffer(struct fb_info * info)499 static int vmw_fb_kms_framebuffer(struct fb_info *info)
500 {
501 	struct drm_mode_fb_cmd2 mode_cmd;
502 	struct vmw_fb_par *par = info->par;
503 	struct fb_var_screeninfo *var = &info->var;
504 	struct drm_framebuffer *cur_fb;
505 	struct vmw_framebuffer *vfb;
506 	int ret = 0, depth;
507 	size_t new_bo_size;
508 
509 	ret = vmw_fb_compute_depth(var, &depth);
510 	if (ret)
511 		return ret;
512 
513 	mode_cmd.width = var->xres;
514 	mode_cmd.height = var->yres;
515 	mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
516 	mode_cmd.pixel_format =
517 		drm_mode_legacy_fb_format(var->bits_per_pixel, depth);
518 
519 	cur_fb = par->set_fb;
520 	if (cur_fb && cur_fb->width == mode_cmd.width &&
521 	    cur_fb->height == mode_cmd.height &&
522 	    cur_fb->format->format == mode_cmd.pixel_format &&
523 	    cur_fb->pitches[0] == mode_cmd.pitches[0])
524 		return 0;
525 
526 	/* Need new buffer object ? */
527 	new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height;
528 	ret = vmw_fb_kms_detach(par,
529 				par->bo_size < new_bo_size ||
530 				par->bo_size > 2*new_bo_size,
531 				true);
532 	if (ret)
533 		return ret;
534 
535 	if (!par->vmw_bo) {
536 		ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
537 				       &par->vmw_bo);
538 		if (ret) {
539 			DRM_ERROR("Failed creating a buffer object for "
540 				  "fbdev.\n");
541 			return ret;
542 		}
543 		par->bo_size = new_bo_size;
544 	}
545 
546 	vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
547 				      true, &mode_cmd);
548 	if (IS_ERR(vfb))
549 		return PTR_ERR(vfb);
550 
551 	par->set_fb = &vfb->base;
552 
553 	return 0;
554 }
555 
vmw_fb_set_par(struct fb_info * info)556 static int vmw_fb_set_par(struct fb_info *info)
557 {
558 	struct vmw_fb_par *par = info->par;
559 	struct vmw_private *vmw_priv = par->vmw_priv;
560 	struct drm_mode_set set;
561 	struct fb_var_screeninfo *var = &info->var;
562 	struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
563 		DRM_MODE_TYPE_DRIVER,
564 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
565 		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
566 	};
567 	struct drm_display_mode *old_mode;
568 	struct drm_display_mode *mode;
569 	int ret;
570 
571 	old_mode = par->set_mode;
572 	mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
573 	if (!mode) {
574 		DRM_ERROR("Could not create new fb mode.\n");
575 		return -ENOMEM;
576 	}
577 
578 	mode->hdisplay = var->xres;
579 	mode->vdisplay = var->yres;
580 	vmw_guess_mode_timing(mode);
581 
582 	if (old_mode && drm_mode_equal(old_mode, mode)) {
583 		drm_mode_destroy(vmw_priv->dev, mode);
584 		mode = old_mode;
585 		old_mode = NULL;
586 	} else if (!vmw_kms_validate_mode_vram(vmw_priv,
587 					mode->hdisplay *
588 					DIV_ROUND_UP(var->bits_per_pixel, 8),
589 					mode->vdisplay)) {
590 		drm_mode_destroy(vmw_priv->dev, mode);
591 		return -EINVAL;
592 	}
593 
594 	mutex_lock(&par->bo_mutex);
595 	ret = vmw_fb_kms_framebuffer(info);
596 	if (ret)
597 		goto out_unlock;
598 
599 	par->fb_x = var->xoffset;
600 	par->fb_y = var->yoffset;
601 
602 	set.crtc = par->crtc;
603 	set.x = 0;
604 	set.y = 0;
605 	set.mode = mode;
606 	set.fb = par->set_fb;
607 	set.num_connectors = 1;
608 	set.connectors = &par->con;
609 
610 	ret = vmwgfx_set_config_internal(&set);
611 	if (ret)
612 		goto out_unlock;
613 
614 	vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
615 			  par->set_fb->width, par->set_fb->height);
616 
617 	/* If there already was stuff dirty we wont
618 	 * schedule a new work, so lets do it now */
619 
620 	schedule_delayed_work(&par->local_work, 0);
621 
622 out_unlock:
623 	if (old_mode)
624 		drm_mode_destroy(vmw_priv->dev, old_mode);
625 	par->set_mode = mode;
626 
627 	mutex_unlock(&par->bo_mutex);
628 
629 	return ret;
630 }
631 
632 
633 static struct fb_ops vmw_fb_ops = {
634 	.owner = THIS_MODULE,
635 	.fb_check_var = vmw_fb_check_var,
636 	.fb_set_par = vmw_fb_set_par,
637 	.fb_setcolreg = vmw_fb_setcolreg,
638 	.fb_fillrect = vmw_fb_fillrect,
639 	.fb_copyarea = vmw_fb_copyarea,
640 	.fb_imageblit = vmw_fb_imageblit,
641 	.fb_pan_display = vmw_fb_pan_display,
642 	.fb_blank = vmw_fb_blank,
643 };
644 
vmw_fb_init(struct vmw_private * vmw_priv)645 int vmw_fb_init(struct vmw_private *vmw_priv)
646 {
647 	struct device *device = &vmw_priv->dev->pdev->dev;
648 	struct vmw_fb_par *par;
649 	struct fb_info *info;
650 	unsigned fb_width, fb_height;
651 	unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
652 	struct drm_display_mode *init_mode;
653 	int ret;
654 
655 	fb_bpp = 32;
656 	fb_depth = 24;
657 
658 	/* XXX As shouldn't these be as well. */
659 	fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
660 	fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
661 
662 	fb_pitch = fb_width * fb_bpp / 8;
663 	fb_size = fb_pitch * fb_height;
664 	fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
665 
666 	info = framebuffer_alloc(sizeof(*par), device);
667 	if (!info)
668 		return -ENOMEM;
669 
670 	/*
671 	 * Par
672 	 */
673 	vmw_priv->fb_info = info;
674 	par = info->par;
675 	memset(par, 0, sizeof(*par));
676 	INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
677 	par->vmw_priv = vmw_priv;
678 	par->vmalloc = NULL;
679 	par->max_width = fb_width;
680 	par->max_height = fb_height;
681 
682 	ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
683 				      par->max_height, &par->con,
684 				      &par->crtc, &init_mode);
685 	if (ret)
686 		goto err_kms;
687 
688 	info->var.xres = init_mode->hdisplay;
689 	info->var.yres = init_mode->vdisplay;
690 
691 	/*
692 	 * Create buffers and alloc memory
693 	 */
694 	par->vmalloc = vzalloc(fb_size);
695 	if (unlikely(par->vmalloc == NULL)) {
696 		ret = -ENOMEM;
697 		goto err_free;
698 	}
699 
700 	/*
701 	 * Fixed and var
702 	 */
703 	strcpy(info->fix.id, "svgadrmfb");
704 	info->fix.type = FB_TYPE_PACKED_PIXELS;
705 	info->fix.visual = FB_VISUAL_TRUECOLOR;
706 	info->fix.type_aux = 0;
707 	info->fix.xpanstep = 1; /* doing it in hw */
708 	info->fix.ypanstep = 1; /* doing it in hw */
709 	info->fix.ywrapstep = 0;
710 	info->fix.accel = FB_ACCEL_NONE;
711 	info->fix.line_length = fb_pitch;
712 
713 	info->fix.smem_start = 0;
714 	info->fix.smem_len = fb_size;
715 
716 	info->pseudo_palette = par->pseudo_palette;
717 	info->screen_base = (char __iomem *)par->vmalloc;
718 	info->screen_size = fb_size;
719 
720 	info->fbops = &vmw_fb_ops;
721 
722 	/* 24 depth per default */
723 	info->var.red.offset = 16;
724 	info->var.green.offset = 8;
725 	info->var.blue.offset = 0;
726 	info->var.red.length = 8;
727 	info->var.green.length = 8;
728 	info->var.blue.length = 8;
729 	info->var.transp.offset = 0;
730 	info->var.transp.length = 0;
731 
732 	info->var.xres_virtual = fb_width;
733 	info->var.yres_virtual = fb_height;
734 	info->var.bits_per_pixel = fb_bpp;
735 	info->var.xoffset = 0;
736 	info->var.yoffset = 0;
737 	info->var.activate = FB_ACTIVATE_NOW;
738 	info->var.height = -1;
739 	info->var.width = -1;
740 
741 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
742 	info->apertures = alloc_apertures(1);
743 	if (!info->apertures) {
744 		ret = -ENOMEM;
745 		goto err_aper;
746 	}
747 	info->apertures->ranges[0].base = vmw_priv->vram_start;
748 	info->apertures->ranges[0].size = vmw_priv->vram_size;
749 
750 	/*
751 	 * Dirty & Deferred IO
752 	 */
753 	par->dirty.x1 = par->dirty.x2 = 0;
754 	par->dirty.y1 = par->dirty.y2 = 0;
755 	par->dirty.active = true;
756 	spin_lock_init(&par->dirty.lock);
757 	mutex_init(&par->bo_mutex);
758 	info->fbdefio = &vmw_defio;
759 	fb_deferred_io_init(info);
760 
761 	ret = register_framebuffer(info);
762 	if (unlikely(ret != 0))
763 		goto err_defio;
764 
765 	vmw_fb_set_par(info);
766 
767 	return 0;
768 
769 err_defio:
770 	fb_deferred_io_cleanup(info);
771 err_aper:
772 err_free:
773 	vfree(par->vmalloc);
774 err_kms:
775 	framebuffer_release(info);
776 	vmw_priv->fb_info = NULL;
777 
778 	return ret;
779 }
780 
vmw_fb_close(struct vmw_private * vmw_priv)781 int vmw_fb_close(struct vmw_private *vmw_priv)
782 {
783 	struct fb_info *info;
784 	struct vmw_fb_par *par;
785 
786 	if (!vmw_priv->fb_info)
787 		return 0;
788 
789 	info = vmw_priv->fb_info;
790 	par = info->par;
791 
792 	/* ??? order */
793 	fb_deferred_io_cleanup(info);
794 	cancel_delayed_work_sync(&par->local_work);
795 	unregister_framebuffer(info);
796 
797 	mutex_lock(&par->bo_mutex);
798 	(void) vmw_fb_kms_detach(par, true, true);
799 	mutex_unlock(&par->bo_mutex);
800 
801 	vfree(par->vmalloc);
802 	framebuffer_release(info);
803 
804 	return 0;
805 }
806 
vmw_fb_off(struct vmw_private * vmw_priv)807 int vmw_fb_off(struct vmw_private *vmw_priv)
808 {
809 	struct fb_info *info;
810 	struct vmw_fb_par *par;
811 	unsigned long flags;
812 
813 	if (!vmw_priv->fb_info)
814 		return -EINVAL;
815 
816 	info = vmw_priv->fb_info;
817 	par = info->par;
818 
819 	spin_lock_irqsave(&par->dirty.lock, flags);
820 	par->dirty.active = false;
821 	spin_unlock_irqrestore(&par->dirty.lock, flags);
822 
823 	flush_delayed_work(&info->deferred_work);
824 	flush_delayed_work(&par->local_work);
825 
826 	return 0;
827 }
828 
vmw_fb_on(struct vmw_private * vmw_priv)829 int vmw_fb_on(struct vmw_private *vmw_priv)
830 {
831 	struct fb_info *info;
832 	struct vmw_fb_par *par;
833 	unsigned long flags;
834 
835 	if (!vmw_priv->fb_info)
836 		return -EINVAL;
837 
838 	info = vmw_priv->fb_info;
839 	par = info->par;
840 
841 	spin_lock_irqsave(&par->dirty.lock, flags);
842 	par->dirty.active = true;
843 	spin_unlock_irqrestore(&par->dirty.lock, flags);
844 
845 	/*
846 	 * Need to reschedule a dirty update, because otherwise that's
847 	 * only done in dirty_mark() if the previous coalesced
848 	 * dirty region was empty.
849 	 */
850 	schedule_delayed_work(&par->local_work, 0);
851 
852 	return 0;
853 }
854