1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29 
30 #include <linux/acpi.h>
31 #include <linux/device.h>
32 #include <linux/oom.h>
33 #include <linux/module.h>
34 #include <linux/pci.h>
35 #include <linux/pm.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/pnp.h>
38 #include <linux/slab.h>
39 #include <linux/vgaarb.h>
40 #include <linux/vga_switcheroo.h>
41 #include <linux/vt.h>
42 #include <acpi/video.h>
43 
44 #include <drm/drm_atomic_helper.h>
45 #include <drm/drm_ioctl.h>
46 #include <drm/drm_irq.h>
47 #include <drm/drm_probe_helper.h>
48 #include <drm/i915_drm.h>
49 
50 #include "display/intel_acpi.h"
51 #include "display/intel_audio.h"
52 #include "display/intel_bw.h"
53 #include "display/intel_cdclk.h"
54 #include "display/intel_display_types.h"
55 #include "display/intel_dp.h"
56 #include "display/intel_fbdev.h"
57 #include "display/intel_gmbus.h"
58 #include "display/intel_hotplug.h"
59 #include "display/intel_overlay.h"
60 #include "display/intel_pipe_crc.h"
61 #include "display/intel_sprite.h"
62 
63 #include "gem/i915_gem_context.h"
64 #include "gem/i915_gem_ioctls.h"
65 #include "gt/intel_gt.h"
66 #include "gt/intel_gt_pm.h"
67 
68 #include "i915_debugfs.h"
69 #include "i915_drv.h"
70 #include "i915_irq.h"
71 #include "i915_memcpy.h"
72 #include "i915_perf.h"
73 #include "i915_query.h"
74 #include "i915_suspend.h"
75 #include "i915_sysfs.h"
76 #include "i915_trace.h"
77 #include "i915_vgpu.h"
78 #include "intel_csr.h"
79 #include "intel_pm.h"
80 
81 static struct drm_driver driver;
82 
83 struct vlv_s0ix_state {
84 	/* GAM */
85 	u32 wr_watermark;
86 	u32 gfx_prio_ctrl;
87 	u32 arb_mode;
88 	u32 gfx_pend_tlb0;
89 	u32 gfx_pend_tlb1;
90 	u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
91 	u32 media_max_req_count;
92 	u32 gfx_max_req_count;
93 	u32 render_hwsp;
94 	u32 ecochk;
95 	u32 bsd_hwsp;
96 	u32 blt_hwsp;
97 	u32 tlb_rd_addr;
98 
99 	/* MBC */
100 	u32 g3dctl;
101 	u32 gsckgctl;
102 	u32 mbctl;
103 
104 	/* GCP */
105 	u32 ucgctl1;
106 	u32 ucgctl3;
107 	u32 rcgctl1;
108 	u32 rcgctl2;
109 	u32 rstctl;
110 	u32 misccpctl;
111 
112 	/* GPM */
113 	u32 gfxpause;
114 	u32 rpdeuhwtc;
115 	u32 rpdeuc;
116 	u32 ecobus;
117 	u32 pwrdwnupctl;
118 	u32 rp_down_timeout;
119 	u32 rp_deucsw;
120 	u32 rcubmabdtmr;
121 	u32 rcedata;
122 	u32 spare2gh;
123 
124 	/* Display 1 CZ domain */
125 	u32 gt_imr;
126 	u32 gt_ier;
127 	u32 pm_imr;
128 	u32 pm_ier;
129 	u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
130 
131 	/* GT SA CZ domain */
132 	u32 tilectl;
133 	u32 gt_fifoctl;
134 	u32 gtlc_wake_ctrl;
135 	u32 gtlc_survive;
136 	u32 pmwgicz;
137 
138 	/* Display 2 CZ domain */
139 	u32 gu_ctl0;
140 	u32 gu_ctl1;
141 	u32 pcbr;
142 	u32 clock_gate_dis2;
143 };
144 
i915_get_bridge_dev(struct drm_i915_private * dev_priv)145 static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
146 {
147 	int domain = pci_domain_nr(dev_priv->drm.pdev->bus);
148 
149 	dev_priv->bridge_dev =
150 		pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
151 	if (!dev_priv->bridge_dev) {
152 		DRM_ERROR("bridge device not found\n");
153 		return -1;
154 	}
155 	return 0;
156 }
157 
158 /* Allocate space for the MCH regs if needed, return nonzero on error */
159 static int
intel_alloc_mchbar_resource(struct drm_i915_private * dev_priv)160 intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
161 {
162 	int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
163 	u32 temp_lo, temp_hi = 0;
164 	u64 mchbar_addr;
165 	int ret;
166 
167 	if (INTEL_GEN(dev_priv) >= 4)
168 		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
169 	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
170 	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
171 
172 	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
173 #ifdef CONFIG_PNP
174 	if (mchbar_addr &&
175 	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
176 		return 0;
177 #endif
178 
179 	/* Get some space for it */
180 	dev_priv->mch_res.name = "i915 MCHBAR";
181 	dev_priv->mch_res.flags = IORESOURCE_MEM;
182 	ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
183 				     &dev_priv->mch_res,
184 				     MCHBAR_SIZE, MCHBAR_SIZE,
185 				     PCIBIOS_MIN_MEM,
186 				     0, pcibios_align_resource,
187 				     dev_priv->bridge_dev);
188 	if (ret) {
189 		DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
190 		dev_priv->mch_res.start = 0;
191 		return ret;
192 	}
193 
194 	if (INTEL_GEN(dev_priv) >= 4)
195 		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
196 				       upper_32_bits(dev_priv->mch_res.start));
197 
198 	pci_write_config_dword(dev_priv->bridge_dev, reg,
199 			       lower_32_bits(dev_priv->mch_res.start));
200 	return 0;
201 }
202 
203 /* Setup MCHBAR if possible, return true if we should disable it again */
204 static void
intel_setup_mchbar(struct drm_i915_private * dev_priv)205 intel_setup_mchbar(struct drm_i915_private *dev_priv)
206 {
207 	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
208 	u32 temp;
209 	bool enabled;
210 
211 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
212 		return;
213 
214 	dev_priv->mchbar_need_disable = false;
215 
216 	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
217 		pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
218 		enabled = !!(temp & DEVEN_MCHBAR_EN);
219 	} else {
220 		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
221 		enabled = temp & 1;
222 	}
223 
224 	/* If it's already enabled, don't have to do anything */
225 	if (enabled)
226 		return;
227 
228 	if (intel_alloc_mchbar_resource(dev_priv))
229 		return;
230 
231 	dev_priv->mchbar_need_disable = true;
232 
233 	/* Space is allocated or reserved, so enable it. */
234 	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
235 		pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
236 				       temp | DEVEN_MCHBAR_EN);
237 	} else {
238 		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
239 		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
240 	}
241 }
242 
243 static void
intel_teardown_mchbar(struct drm_i915_private * dev_priv)244 intel_teardown_mchbar(struct drm_i915_private *dev_priv)
245 {
246 	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
247 
248 	if (dev_priv->mchbar_need_disable) {
249 		if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
250 			u32 deven_val;
251 
252 			pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
253 					      &deven_val);
254 			deven_val &= ~DEVEN_MCHBAR_EN;
255 			pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
256 					       deven_val);
257 		} else {
258 			u32 mchbar_val;
259 
260 			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
261 					      &mchbar_val);
262 			mchbar_val &= ~1;
263 			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
264 					       mchbar_val);
265 		}
266 	}
267 
268 	if (dev_priv->mch_res.start)
269 		release_resource(&dev_priv->mch_res);
270 }
271 
272 /* true = enable decode, false = disable decoder */
i915_vga_set_decode(void * cookie,bool state)273 static unsigned int i915_vga_set_decode(void *cookie, bool state)
274 {
275 	struct drm_i915_private *dev_priv = cookie;
276 
277 	intel_modeset_vga_set_state(dev_priv, state);
278 	if (state)
279 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
280 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
281 	else
282 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
283 }
284 
285 static int i915_resume_switcheroo(struct drm_i915_private *i915);
286 static int i915_suspend_switcheroo(struct drm_i915_private *i915,
287 				   pm_message_t state);
288 
i915_switcheroo_set_state(struct pci_dev * pdev,enum vga_switcheroo_state state)289 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
290 {
291 	struct drm_i915_private *i915 = pdev_to_i915(pdev);
292 	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
293 
294 	if (!i915) {
295 		dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
296 		return;
297 	}
298 
299 	if (state == VGA_SWITCHEROO_ON) {
300 		pr_info("switched on\n");
301 		i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
302 		/* i915 resume handler doesn't set to D0 */
303 		pci_set_power_state(pdev, PCI_D0);
304 		i915_resume_switcheroo(i915);
305 		i915->drm.switch_power_state = DRM_SWITCH_POWER_ON;
306 	} else {
307 		pr_info("switched off\n");
308 		i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
309 		i915_suspend_switcheroo(i915, pmm);
310 		i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF;
311 	}
312 }
313 
i915_switcheroo_can_switch(struct pci_dev * pdev)314 static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
315 {
316 	struct drm_i915_private *i915 = pdev_to_i915(pdev);
317 
318 	/*
319 	 * FIXME: open_count is protected by drm_global_mutex but that would lead to
320 	 * locking inversion with the driver load path. And the access here is
321 	 * completely racy anyway. So don't bother with locking for now.
322 	 */
323 	return i915 && i915->drm.open_count == 0;
324 }
325 
326 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
327 	.set_gpu_state = i915_switcheroo_set_state,
328 	.reprobe = NULL,
329 	.can_switch = i915_switcheroo_can_switch,
330 };
331 
i915_driver_modeset_probe(struct drm_device * dev)332 static int i915_driver_modeset_probe(struct drm_device *dev)
333 {
334 	struct drm_i915_private *dev_priv = to_i915(dev);
335 	struct pci_dev *pdev = dev_priv->drm.pdev;
336 	int ret;
337 
338 	if (i915_inject_probe_failure(dev_priv))
339 		return -ENODEV;
340 
341 	if (HAS_DISPLAY(dev_priv)) {
342 		ret = drm_vblank_init(&dev_priv->drm,
343 				      INTEL_INFO(dev_priv)->num_pipes);
344 		if (ret)
345 			goto out;
346 	}
347 
348 	intel_bios_init(dev_priv);
349 
350 	/* If we have > 1 VGA cards, then we need to arbitrate access
351 	 * to the common VGA resources.
352 	 *
353 	 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
354 	 * then we do not take part in VGA arbitration and the
355 	 * vga_client_register() fails with -ENODEV.
356 	 */
357 	ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode);
358 	if (ret && ret != -ENODEV)
359 		goto out;
360 
361 	intel_register_dsm_handler();
362 
363 	ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
364 	if (ret)
365 		goto cleanup_vga_client;
366 
367 	intel_power_domains_init_hw(dev_priv, false);
368 
369 	intel_csr_ucode_init(dev_priv);
370 
371 	ret = intel_irq_install(dev_priv);
372 	if (ret)
373 		goto cleanup_csr;
374 
375 	intel_gmbus_setup(dev_priv);
376 
377 	/* Important: The output setup functions called by modeset_init need
378 	 * working irqs for e.g. gmbus and dp aux transfers. */
379 	ret = intel_modeset_init(dev);
380 	if (ret)
381 		goto cleanup_irq;
382 
383 	ret = i915_gem_init(dev_priv);
384 	if (ret)
385 		goto cleanup_modeset;
386 
387 	intel_overlay_setup(dev_priv);
388 
389 	if (!HAS_DISPLAY(dev_priv))
390 		return 0;
391 
392 	ret = intel_fbdev_init(dev);
393 	if (ret)
394 		goto cleanup_gem;
395 
396 	/* Only enable hotplug handling once the fbdev is fully set up. */
397 	intel_hpd_init(dev_priv);
398 
399 	intel_init_ipc(dev_priv);
400 
401 	return 0;
402 
403 cleanup_gem:
404 	i915_gem_suspend(dev_priv);
405 	i915_gem_driver_remove(dev_priv);
406 	i915_gem_driver_release(dev_priv);
407 cleanup_modeset:
408 	intel_modeset_driver_remove(dev);
409 cleanup_irq:
410 	intel_irq_uninstall(dev_priv);
411 	intel_gmbus_teardown(dev_priv);
412 cleanup_csr:
413 	intel_csr_ucode_fini(dev_priv);
414 	intel_power_domains_driver_remove(dev_priv);
415 	vga_switcheroo_unregister_client(pdev);
416 cleanup_vga_client:
417 	vga_client_register(pdev, NULL, NULL, NULL);
418 out:
419 	return ret;
420 }
421 
i915_kick_out_firmware_fb(struct drm_i915_private * dev_priv)422 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
423 {
424 	struct apertures_struct *ap;
425 	struct pci_dev *pdev = dev_priv->drm.pdev;
426 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
427 	bool primary;
428 	int ret;
429 
430 	ap = alloc_apertures(1);
431 	if (!ap)
432 		return -ENOMEM;
433 
434 	ap->ranges[0].base = ggtt->gmadr.start;
435 	ap->ranges[0].size = ggtt->mappable_end;
436 
437 	primary =
438 		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
439 
440 	ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
441 
442 	kfree(ap);
443 
444 	return ret;
445 }
446 
intel_init_dpio(struct drm_i915_private * dev_priv)447 static void intel_init_dpio(struct drm_i915_private *dev_priv)
448 {
449 	/*
450 	 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
451 	 * CHV x1 PHY (DP/HDMI D)
452 	 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
453 	 */
454 	if (IS_CHERRYVIEW(dev_priv)) {
455 		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
456 		DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
457 	} else if (IS_VALLEYVIEW(dev_priv)) {
458 		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
459 	}
460 }
461 
i915_workqueues_init(struct drm_i915_private * dev_priv)462 static int i915_workqueues_init(struct drm_i915_private *dev_priv)
463 {
464 	/*
465 	 * The i915 workqueue is primarily used for batched retirement of
466 	 * requests (and thus managing bo) once the task has been completed
467 	 * by the GPU. i915_retire_requests() is called directly when we
468 	 * need high-priority retirement, such as waiting for an explicit
469 	 * bo.
470 	 *
471 	 * It is also used for periodic low-priority events, such as
472 	 * idle-timers and recording error state.
473 	 *
474 	 * All tasks on the workqueue are expected to acquire the dev mutex
475 	 * so there is no point in running more than one instance of the
476 	 * workqueue at any time.  Use an ordered one.
477 	 */
478 	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
479 	if (dev_priv->wq == NULL)
480 		goto out_err;
481 
482 	dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
483 	if (dev_priv->hotplug.dp_wq == NULL)
484 		goto out_free_wq;
485 
486 	return 0;
487 
488 out_free_wq:
489 	destroy_workqueue(dev_priv->wq);
490 out_err:
491 	DRM_ERROR("Failed to allocate workqueues.\n");
492 
493 	return -ENOMEM;
494 }
495 
i915_workqueues_cleanup(struct drm_i915_private * dev_priv)496 static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
497 {
498 	destroy_workqueue(dev_priv->hotplug.dp_wq);
499 	destroy_workqueue(dev_priv->wq);
500 }
501 
502 /*
503  * We don't keep the workarounds for pre-production hardware, so we expect our
504  * driver to fail on these machines in one way or another. A little warning on
505  * dmesg may help both the user and the bug triagers.
506  *
507  * Our policy for removing pre-production workarounds is to keep the
508  * current gen workarounds as a guide to the bring-up of the next gen
509  * (workarounds have a habit of persisting!). Anything older than that
510  * should be removed along with the complications they introduce.
511  */
intel_detect_preproduction_hw(struct drm_i915_private * dev_priv)512 static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
513 {
514 	bool pre = false;
515 
516 	pre |= IS_HSW_EARLY_SDV(dev_priv);
517 	pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
518 	pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
519 	pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
520 
521 	if (pre) {
522 		DRM_ERROR("This is a pre-production stepping. "
523 			  "It may not be fully functional.\n");
524 		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
525 	}
526 }
527 
vlv_alloc_s0ix_state(struct drm_i915_private * i915)528 static int vlv_alloc_s0ix_state(struct drm_i915_private *i915)
529 {
530 	if (!IS_VALLEYVIEW(i915))
531 		return 0;
532 
533 	/* we write all the values in the struct, so no need to zero it out */
534 	i915->vlv_s0ix_state = kmalloc(sizeof(*i915->vlv_s0ix_state),
535 				       GFP_KERNEL);
536 	if (!i915->vlv_s0ix_state)
537 		return -ENOMEM;
538 
539 	return 0;
540 }
541 
vlv_free_s0ix_state(struct drm_i915_private * i915)542 static void vlv_free_s0ix_state(struct drm_i915_private *i915)
543 {
544 	if (!i915->vlv_s0ix_state)
545 		return;
546 
547 	kfree(i915->vlv_s0ix_state);
548 	i915->vlv_s0ix_state = NULL;
549 }
550 
551 /**
552  * i915_driver_early_probe - setup state not requiring device access
553  * @dev_priv: device private
554  *
555  * Initialize everything that is a "SW-only" state, that is state not
556  * requiring accessing the device or exposing the driver via kernel internal
557  * or userspace interfaces. Example steps belonging here: lock initialization,
558  * system memory allocation, setting up device specific attributes and
559  * function hooks not requiring accessing the device.
560  */
i915_driver_early_probe(struct drm_i915_private * dev_priv)561 static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
562 {
563 	int ret = 0;
564 
565 	if (i915_inject_probe_failure(dev_priv))
566 		return -ENODEV;
567 
568 	intel_device_info_subplatform_init(dev_priv);
569 
570 	intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
571 	intel_uncore_init_early(&dev_priv->uncore, dev_priv);
572 
573 	spin_lock_init(&dev_priv->irq_lock);
574 	spin_lock_init(&dev_priv->gpu_error.lock);
575 	mutex_init(&dev_priv->backlight_lock);
576 
577 	mutex_init(&dev_priv->sb_lock);
578 	pm_qos_add_request(&dev_priv->sb_qos,
579 			   PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
580 
581 	mutex_init(&dev_priv->av_mutex);
582 	mutex_init(&dev_priv->wm.wm_mutex);
583 	mutex_init(&dev_priv->pps_mutex);
584 	mutex_init(&dev_priv->hdcp_comp_mutex);
585 
586 	i915_memcpy_init_early(dev_priv);
587 	intel_runtime_pm_init_early(&dev_priv->runtime_pm);
588 
589 	ret = i915_workqueues_init(dev_priv);
590 	if (ret < 0)
591 		return ret;
592 
593 	ret = vlv_alloc_s0ix_state(dev_priv);
594 	if (ret < 0)
595 		goto err_workqueues;
596 
597 	intel_wopcm_init_early(&dev_priv->wopcm);
598 
599 	intel_gt_init_early(&dev_priv->gt, dev_priv);
600 
601 	ret = i915_gem_init_early(dev_priv);
602 	if (ret < 0)
603 		goto err_gt;
604 
605 	/* This must be called before any calls to HAS_PCH_* */
606 	intel_detect_pch(dev_priv);
607 
608 	intel_pm_setup(dev_priv);
609 	intel_init_dpio(dev_priv);
610 	ret = intel_power_domains_init(dev_priv);
611 	if (ret < 0)
612 		goto err_gem;
613 	intel_irq_init(dev_priv);
614 	intel_init_display_hooks(dev_priv);
615 	intel_init_clock_gating_hooks(dev_priv);
616 	intel_init_audio_hooks(dev_priv);
617 	intel_display_crc_init(dev_priv);
618 
619 	intel_detect_preproduction_hw(dev_priv);
620 
621 	return 0;
622 
623 err_gem:
624 	i915_gem_cleanup_early(dev_priv);
625 err_gt:
626 	intel_gt_driver_late_release(&dev_priv->gt);
627 	vlv_free_s0ix_state(dev_priv);
628 err_workqueues:
629 	i915_workqueues_cleanup(dev_priv);
630 	return ret;
631 }
632 
633 /**
634  * i915_driver_late_release - cleanup the setup done in
635  *			       i915_driver_early_probe()
636  * @dev_priv: device private
637  */
i915_driver_late_release(struct drm_i915_private * dev_priv)638 static void i915_driver_late_release(struct drm_i915_private *dev_priv)
639 {
640 	intel_irq_fini(dev_priv);
641 	intel_power_domains_cleanup(dev_priv);
642 	i915_gem_cleanup_early(dev_priv);
643 	intel_gt_driver_late_release(&dev_priv->gt);
644 	vlv_free_s0ix_state(dev_priv);
645 	i915_workqueues_cleanup(dev_priv);
646 
647 	pm_qos_remove_request(&dev_priv->sb_qos);
648 	mutex_destroy(&dev_priv->sb_lock);
649 }
650 
651 /**
652  * i915_driver_mmio_probe - setup device MMIO
653  * @dev_priv: device private
654  *
655  * Setup minimal device state necessary for MMIO accesses later in the
656  * initialization sequence. The setup here should avoid any other device-wide
657  * side effects or exposing the driver via kernel internal or user space
658  * interfaces.
659  */
i915_driver_mmio_probe(struct drm_i915_private * dev_priv)660 static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
661 {
662 	int ret;
663 
664 	if (i915_inject_probe_failure(dev_priv))
665 		return -ENODEV;
666 
667 	if (i915_get_bridge_dev(dev_priv))
668 		return -EIO;
669 
670 	ret = intel_uncore_init_mmio(&dev_priv->uncore);
671 	if (ret < 0)
672 		goto err_bridge;
673 
674 	/* Try to make sure MCHBAR is enabled before poking at it */
675 	intel_setup_mchbar(dev_priv);
676 
677 	intel_device_info_init_mmio(dev_priv);
678 
679 	intel_uncore_prune_mmio_domains(&dev_priv->uncore);
680 
681 	intel_uc_init_mmio(&dev_priv->gt.uc);
682 
683 	ret = intel_engines_init_mmio(dev_priv);
684 	if (ret)
685 		goto err_uncore;
686 
687 	i915_gem_init_mmio(dev_priv);
688 
689 	return 0;
690 
691 err_uncore:
692 	intel_teardown_mchbar(dev_priv);
693 	intel_uncore_fini_mmio(&dev_priv->uncore);
694 err_bridge:
695 	pci_dev_put(dev_priv->bridge_dev);
696 
697 	return ret;
698 }
699 
700 /**
701  * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
702  * @dev_priv: device private
703  */
i915_driver_mmio_release(struct drm_i915_private * dev_priv)704 static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
705 {
706 	intel_engines_cleanup(dev_priv);
707 	intel_teardown_mchbar(dev_priv);
708 	intel_uncore_fini_mmio(&dev_priv->uncore);
709 	pci_dev_put(dev_priv->bridge_dev);
710 }
711 
intel_sanitize_options(struct drm_i915_private * dev_priv)712 static void intel_sanitize_options(struct drm_i915_private *dev_priv)
713 {
714 	intel_gvt_sanitize_options(dev_priv);
715 }
716 
717 #define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
718 
intel_dram_type_str(enum intel_dram_type type)719 static const char *intel_dram_type_str(enum intel_dram_type type)
720 {
721 	static const char * const str[] = {
722 		DRAM_TYPE_STR(UNKNOWN),
723 		DRAM_TYPE_STR(DDR3),
724 		DRAM_TYPE_STR(DDR4),
725 		DRAM_TYPE_STR(LPDDR3),
726 		DRAM_TYPE_STR(LPDDR4),
727 	};
728 
729 	if (type >= ARRAY_SIZE(str))
730 		type = INTEL_DRAM_UNKNOWN;
731 
732 	return str[type];
733 }
734 
735 #undef DRAM_TYPE_STR
736 
intel_dimm_num_devices(const struct dram_dimm_info * dimm)737 static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
738 {
739 	return dimm->ranks * 64 / (dimm->width ?: 1);
740 }
741 
742 /* Returns total GB for the whole DIMM */
skl_get_dimm_size(u16 val)743 static int skl_get_dimm_size(u16 val)
744 {
745 	return val & SKL_DRAM_SIZE_MASK;
746 }
747 
skl_get_dimm_width(u16 val)748 static int skl_get_dimm_width(u16 val)
749 {
750 	if (skl_get_dimm_size(val) == 0)
751 		return 0;
752 
753 	switch (val & SKL_DRAM_WIDTH_MASK) {
754 	case SKL_DRAM_WIDTH_X8:
755 	case SKL_DRAM_WIDTH_X16:
756 	case SKL_DRAM_WIDTH_X32:
757 		val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
758 		return 8 << val;
759 	default:
760 		MISSING_CASE(val);
761 		return 0;
762 	}
763 }
764 
skl_get_dimm_ranks(u16 val)765 static int skl_get_dimm_ranks(u16 val)
766 {
767 	if (skl_get_dimm_size(val) == 0)
768 		return 0;
769 
770 	val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
771 
772 	return val + 1;
773 }
774 
775 /* Returns total GB for the whole DIMM */
cnl_get_dimm_size(u16 val)776 static int cnl_get_dimm_size(u16 val)
777 {
778 	return (val & CNL_DRAM_SIZE_MASK) / 2;
779 }
780 
cnl_get_dimm_width(u16 val)781 static int cnl_get_dimm_width(u16 val)
782 {
783 	if (cnl_get_dimm_size(val) == 0)
784 		return 0;
785 
786 	switch (val & CNL_DRAM_WIDTH_MASK) {
787 	case CNL_DRAM_WIDTH_X8:
788 	case CNL_DRAM_WIDTH_X16:
789 	case CNL_DRAM_WIDTH_X32:
790 		val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
791 		return 8 << val;
792 	default:
793 		MISSING_CASE(val);
794 		return 0;
795 	}
796 }
797 
cnl_get_dimm_ranks(u16 val)798 static int cnl_get_dimm_ranks(u16 val)
799 {
800 	if (cnl_get_dimm_size(val) == 0)
801 		return 0;
802 
803 	val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
804 
805 	return val + 1;
806 }
807 
808 static bool
skl_is_16gb_dimm(const struct dram_dimm_info * dimm)809 skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
810 {
811 	/* Convert total GB to Gb per DRAM device */
812 	return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
813 }
814 
815 static void
skl_dram_get_dimm_info(struct drm_i915_private * dev_priv,struct dram_dimm_info * dimm,int channel,char dimm_name,u16 val)816 skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
817 		       struct dram_dimm_info *dimm,
818 		       int channel, char dimm_name, u16 val)
819 {
820 	if (INTEL_GEN(dev_priv) >= 10) {
821 		dimm->size = cnl_get_dimm_size(val);
822 		dimm->width = cnl_get_dimm_width(val);
823 		dimm->ranks = cnl_get_dimm_ranks(val);
824 	} else {
825 		dimm->size = skl_get_dimm_size(val);
826 		dimm->width = skl_get_dimm_width(val);
827 		dimm->ranks = skl_get_dimm_ranks(val);
828 	}
829 
830 	DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
831 		      channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
832 		      yesno(skl_is_16gb_dimm(dimm)));
833 }
834 
835 static int
skl_dram_get_channel_info(struct drm_i915_private * dev_priv,struct dram_channel_info * ch,int channel,u32 val)836 skl_dram_get_channel_info(struct drm_i915_private *dev_priv,
837 			  struct dram_channel_info *ch,
838 			  int channel, u32 val)
839 {
840 	skl_dram_get_dimm_info(dev_priv, &ch->dimm_l,
841 			       channel, 'L', val & 0xffff);
842 	skl_dram_get_dimm_info(dev_priv, &ch->dimm_s,
843 			       channel, 'S', val >> 16);
844 
845 	if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
846 		DRM_DEBUG_KMS("CH%u not populated\n", channel);
847 		return -EINVAL;
848 	}
849 
850 	if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
851 		ch->ranks = 2;
852 	else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
853 		ch->ranks = 2;
854 	else
855 		ch->ranks = 1;
856 
857 	ch->is_16gb_dimm =
858 		skl_is_16gb_dimm(&ch->dimm_l) ||
859 		skl_is_16gb_dimm(&ch->dimm_s);
860 
861 	DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n",
862 		      channel, ch->ranks, yesno(ch->is_16gb_dimm));
863 
864 	return 0;
865 }
866 
867 static bool
intel_is_dram_symmetric(const struct dram_channel_info * ch0,const struct dram_channel_info * ch1)868 intel_is_dram_symmetric(const struct dram_channel_info *ch0,
869 			const struct dram_channel_info *ch1)
870 {
871 	return !memcmp(ch0, ch1, sizeof(*ch0)) &&
872 		(ch0->dimm_s.size == 0 ||
873 		 !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
874 }
875 
876 static int
skl_dram_get_channels_info(struct drm_i915_private * dev_priv)877 skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
878 {
879 	struct dram_info *dram_info = &dev_priv->dram_info;
880 	struct dram_channel_info ch0 = {}, ch1 = {};
881 	u32 val;
882 	int ret;
883 
884 	val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
885 	ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val);
886 	if (ret == 0)
887 		dram_info->num_channels++;
888 
889 	val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
890 	ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val);
891 	if (ret == 0)
892 		dram_info->num_channels++;
893 
894 	if (dram_info->num_channels == 0) {
895 		DRM_INFO("Number of memory channels is zero\n");
896 		return -EINVAL;
897 	}
898 
899 	/*
900 	 * If any of the channel is single rank channel, worst case output
901 	 * will be same as if single rank memory, so consider single rank
902 	 * memory.
903 	 */
904 	if (ch0.ranks == 1 || ch1.ranks == 1)
905 		dram_info->ranks = 1;
906 	else
907 		dram_info->ranks = max(ch0.ranks, ch1.ranks);
908 
909 	if (dram_info->ranks == 0) {
910 		DRM_INFO("couldn't get memory rank information\n");
911 		return -EINVAL;
912 	}
913 
914 	dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
915 
916 	dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
917 
918 	DRM_DEBUG_KMS("Memory configuration is symmetric? %s\n",
919 		      yesno(dram_info->symmetric_memory));
920 	return 0;
921 }
922 
923 static enum intel_dram_type
skl_get_dram_type(struct drm_i915_private * dev_priv)924 skl_get_dram_type(struct drm_i915_private *dev_priv)
925 {
926 	u32 val;
927 
928 	val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
929 
930 	switch (val & SKL_DRAM_DDR_TYPE_MASK) {
931 	case SKL_DRAM_DDR_TYPE_DDR3:
932 		return INTEL_DRAM_DDR3;
933 	case SKL_DRAM_DDR_TYPE_DDR4:
934 		return INTEL_DRAM_DDR4;
935 	case SKL_DRAM_DDR_TYPE_LPDDR3:
936 		return INTEL_DRAM_LPDDR3;
937 	case SKL_DRAM_DDR_TYPE_LPDDR4:
938 		return INTEL_DRAM_LPDDR4;
939 	default:
940 		MISSING_CASE(val);
941 		return INTEL_DRAM_UNKNOWN;
942 	}
943 }
944 
945 static int
skl_get_dram_info(struct drm_i915_private * dev_priv)946 skl_get_dram_info(struct drm_i915_private *dev_priv)
947 {
948 	struct dram_info *dram_info = &dev_priv->dram_info;
949 	u32 mem_freq_khz, val;
950 	int ret;
951 
952 	dram_info->type = skl_get_dram_type(dev_priv);
953 	DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type));
954 
955 	ret = skl_dram_get_channels_info(dev_priv);
956 	if (ret)
957 		return ret;
958 
959 	val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
960 	mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
961 				    SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
962 
963 	dram_info->bandwidth_kbps = dram_info->num_channels *
964 							mem_freq_khz * 8;
965 
966 	if (dram_info->bandwidth_kbps == 0) {
967 		DRM_INFO("Couldn't get system memory bandwidth\n");
968 		return -EINVAL;
969 	}
970 
971 	dram_info->valid = true;
972 	return 0;
973 }
974 
975 /* Returns Gb per DRAM device */
bxt_get_dimm_size(u32 val)976 static int bxt_get_dimm_size(u32 val)
977 {
978 	switch (val & BXT_DRAM_SIZE_MASK) {
979 	case BXT_DRAM_SIZE_4GBIT:
980 		return 4;
981 	case BXT_DRAM_SIZE_6GBIT:
982 		return 6;
983 	case BXT_DRAM_SIZE_8GBIT:
984 		return 8;
985 	case BXT_DRAM_SIZE_12GBIT:
986 		return 12;
987 	case BXT_DRAM_SIZE_16GBIT:
988 		return 16;
989 	default:
990 		MISSING_CASE(val);
991 		return 0;
992 	}
993 }
994 
bxt_get_dimm_width(u32 val)995 static int bxt_get_dimm_width(u32 val)
996 {
997 	if (!bxt_get_dimm_size(val))
998 		return 0;
999 
1000 	val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
1001 
1002 	return 8 << val;
1003 }
1004 
bxt_get_dimm_ranks(u32 val)1005 static int bxt_get_dimm_ranks(u32 val)
1006 {
1007 	if (!bxt_get_dimm_size(val))
1008 		return 0;
1009 
1010 	switch (val & BXT_DRAM_RANK_MASK) {
1011 	case BXT_DRAM_RANK_SINGLE:
1012 		return 1;
1013 	case BXT_DRAM_RANK_DUAL:
1014 		return 2;
1015 	default:
1016 		MISSING_CASE(val);
1017 		return 0;
1018 	}
1019 }
1020 
bxt_get_dimm_type(u32 val)1021 static enum intel_dram_type bxt_get_dimm_type(u32 val)
1022 {
1023 	if (!bxt_get_dimm_size(val))
1024 		return INTEL_DRAM_UNKNOWN;
1025 
1026 	switch (val & BXT_DRAM_TYPE_MASK) {
1027 	case BXT_DRAM_TYPE_DDR3:
1028 		return INTEL_DRAM_DDR3;
1029 	case BXT_DRAM_TYPE_LPDDR3:
1030 		return INTEL_DRAM_LPDDR3;
1031 	case BXT_DRAM_TYPE_DDR4:
1032 		return INTEL_DRAM_DDR4;
1033 	case BXT_DRAM_TYPE_LPDDR4:
1034 		return INTEL_DRAM_LPDDR4;
1035 	default:
1036 		MISSING_CASE(val);
1037 		return INTEL_DRAM_UNKNOWN;
1038 	}
1039 }
1040 
bxt_get_dimm_info(struct dram_dimm_info * dimm,u32 val)1041 static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
1042 			      u32 val)
1043 {
1044 	dimm->width = bxt_get_dimm_width(val);
1045 	dimm->ranks = bxt_get_dimm_ranks(val);
1046 
1047 	/*
1048 	 * Size in register is Gb per DRAM device. Convert to total
1049 	 * GB to match the way we report this for non-LP platforms.
1050 	 */
1051 	dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
1052 }
1053 
1054 static int
bxt_get_dram_info(struct drm_i915_private * dev_priv)1055 bxt_get_dram_info(struct drm_i915_private *dev_priv)
1056 {
1057 	struct dram_info *dram_info = &dev_priv->dram_info;
1058 	u32 dram_channels;
1059 	u32 mem_freq_khz, val;
1060 	u8 num_active_channels;
1061 	int i;
1062 
1063 	val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0);
1064 	mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
1065 				    BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
1066 
1067 	dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
1068 	num_active_channels = hweight32(dram_channels);
1069 
1070 	/* Each active bit represents 4-byte channel */
1071 	dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
1072 
1073 	if (dram_info->bandwidth_kbps == 0) {
1074 		DRM_INFO("Couldn't get system memory bandwidth\n");
1075 		return -EINVAL;
1076 	}
1077 
1078 	/*
1079 	 * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
1080 	 */
1081 	for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
1082 		struct dram_dimm_info dimm;
1083 		enum intel_dram_type type;
1084 
1085 		val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
1086 		if (val == 0xFFFFFFFF)
1087 			continue;
1088 
1089 		dram_info->num_channels++;
1090 
1091 		bxt_get_dimm_info(&dimm, val);
1092 		type = bxt_get_dimm_type(val);
1093 
1094 		WARN_ON(type != INTEL_DRAM_UNKNOWN &&
1095 			dram_info->type != INTEL_DRAM_UNKNOWN &&
1096 			dram_info->type != type);
1097 
1098 		DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
1099 			      i - BXT_D_CR_DRP0_DUNIT_START,
1100 			      dimm.size, dimm.width, dimm.ranks,
1101 			      intel_dram_type_str(type));
1102 
1103 		/*
1104 		 * If any of the channel is single rank channel,
1105 		 * worst case output will be same as if single rank
1106 		 * memory, so consider single rank memory.
1107 		 */
1108 		if (dram_info->ranks == 0)
1109 			dram_info->ranks = dimm.ranks;
1110 		else if (dimm.ranks == 1)
1111 			dram_info->ranks = 1;
1112 
1113 		if (type != INTEL_DRAM_UNKNOWN)
1114 			dram_info->type = type;
1115 	}
1116 
1117 	if (dram_info->type == INTEL_DRAM_UNKNOWN ||
1118 	    dram_info->ranks == 0) {
1119 		DRM_INFO("couldn't get memory information\n");
1120 		return -EINVAL;
1121 	}
1122 
1123 	dram_info->valid = true;
1124 	return 0;
1125 }
1126 
1127 static void
intel_get_dram_info(struct drm_i915_private * dev_priv)1128 intel_get_dram_info(struct drm_i915_private *dev_priv)
1129 {
1130 	struct dram_info *dram_info = &dev_priv->dram_info;
1131 	int ret;
1132 
1133 	/*
1134 	 * Assume 16Gb DIMMs are present until proven otherwise.
1135 	 * This is only used for the level 0 watermark latency
1136 	 * w/a which does not apply to bxt/glk.
1137 	 */
1138 	dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
1139 
1140 	if (INTEL_GEN(dev_priv) < 9)
1141 		return;
1142 
1143 	if (IS_GEN9_LP(dev_priv))
1144 		ret = bxt_get_dram_info(dev_priv);
1145 	else
1146 		ret = skl_get_dram_info(dev_priv);
1147 	if (ret)
1148 		return;
1149 
1150 	DRM_DEBUG_KMS("DRAM bandwidth: %u kBps, channels: %u\n",
1151 		      dram_info->bandwidth_kbps,
1152 		      dram_info->num_channels);
1153 
1154 	DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n",
1155 		      dram_info->ranks, yesno(dram_info->is_16gb_dimm));
1156 }
1157 
gen9_edram_size_mb(struct drm_i915_private * dev_priv,u32 cap)1158 static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
1159 {
1160 	const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
1161 	const unsigned int sets[4] = { 1, 1, 2, 2 };
1162 
1163 	return EDRAM_NUM_BANKS(cap) *
1164 		ways[EDRAM_WAYS_IDX(cap)] *
1165 		sets[EDRAM_SETS_IDX(cap)];
1166 }
1167 
edram_detect(struct drm_i915_private * dev_priv)1168 static void edram_detect(struct drm_i915_private *dev_priv)
1169 {
1170 	u32 edram_cap = 0;
1171 
1172 	if (!(IS_HASWELL(dev_priv) ||
1173 	      IS_BROADWELL(dev_priv) ||
1174 	      INTEL_GEN(dev_priv) >= 9))
1175 		return;
1176 
1177 	edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP);
1178 
1179 	/* NB: We can't write IDICR yet because we don't have gt funcs set up */
1180 
1181 	if (!(edram_cap & EDRAM_ENABLED))
1182 		return;
1183 
1184 	/*
1185 	 * The needed capability bits for size calculation are not there with
1186 	 * pre gen9 so return 128MB always.
1187 	 */
1188 	if (INTEL_GEN(dev_priv) < 9)
1189 		dev_priv->edram_size_mb = 128;
1190 	else
1191 		dev_priv->edram_size_mb =
1192 			gen9_edram_size_mb(dev_priv, edram_cap);
1193 
1194 	dev_info(dev_priv->drm.dev,
1195 		 "Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
1196 }
1197 
1198 /**
1199  * i915_driver_hw_probe - setup state requiring device access
1200  * @dev_priv: device private
1201  *
1202  * Setup state that requires accessing the device, but doesn't require
1203  * exposing the driver via kernel internal or userspace interfaces.
1204  */
i915_driver_hw_probe(struct drm_i915_private * dev_priv)1205 static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
1206 {
1207 	struct pci_dev *pdev = dev_priv->drm.pdev;
1208 	int ret;
1209 
1210 	if (i915_inject_probe_failure(dev_priv))
1211 		return -ENODEV;
1212 
1213 	intel_device_info_runtime_init(dev_priv);
1214 
1215 	if (HAS_PPGTT(dev_priv)) {
1216 		if (intel_vgpu_active(dev_priv) &&
1217 		    !intel_vgpu_has_full_ppgtt(dev_priv)) {
1218 			i915_report_error(dev_priv,
1219 					  "incompatible vGPU found, support for isolated ppGTT required\n");
1220 			return -ENXIO;
1221 		}
1222 	}
1223 
1224 	if (HAS_EXECLISTS(dev_priv)) {
1225 		/*
1226 		 * Older GVT emulation depends upon intercepting CSB mmio,
1227 		 * which we no longer use, preferring to use the HWSP cache
1228 		 * instead.
1229 		 */
1230 		if (intel_vgpu_active(dev_priv) &&
1231 		    !intel_vgpu_has_hwsp_emulation(dev_priv)) {
1232 			i915_report_error(dev_priv,
1233 					  "old vGPU host found, support for HWSP emulation required\n");
1234 			return -ENXIO;
1235 		}
1236 	}
1237 
1238 	intel_sanitize_options(dev_priv);
1239 
1240 	/* needs to be done before ggtt probe */
1241 	edram_detect(dev_priv);
1242 
1243 	i915_perf_init(dev_priv);
1244 
1245 	ret = i915_ggtt_probe_hw(dev_priv);
1246 	if (ret)
1247 		goto err_perf;
1248 
1249 	/*
1250 	 * WARNING: Apparently we must kick fbdev drivers before vgacon,
1251 	 * otherwise the vga fbdev driver falls over.
1252 	 */
1253 	ret = i915_kick_out_firmware_fb(dev_priv);
1254 	if (ret) {
1255 		DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1256 		goto err_ggtt;
1257 	}
1258 
1259 	ret = vga_remove_vgacon(pdev);
1260 	if (ret) {
1261 		DRM_ERROR("failed to remove conflicting VGA console\n");
1262 		goto err_ggtt;
1263 	}
1264 
1265 	ret = i915_ggtt_init_hw(dev_priv);
1266 	if (ret)
1267 		goto err_ggtt;
1268 
1269 	intel_gt_init_hw(dev_priv);
1270 
1271 	ret = i915_ggtt_enable_hw(dev_priv);
1272 	if (ret) {
1273 		DRM_ERROR("failed to enable GGTT\n");
1274 		goto err_ggtt;
1275 	}
1276 
1277 	pci_set_master(pdev);
1278 
1279 	/*
1280 	 * We don't have a max segment size, so set it to the max so sg's
1281 	 * debugging layer doesn't complain
1282 	 */
1283 	dma_set_max_seg_size(&pdev->dev, UINT_MAX);
1284 
1285 	/* overlay on gen2 is broken and can't address above 1G */
1286 	if (IS_GEN(dev_priv, 2)) {
1287 		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
1288 		if (ret) {
1289 			DRM_ERROR("failed to set DMA mask\n");
1290 
1291 			goto err_ggtt;
1292 		}
1293 	}
1294 
1295 	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
1296 	 * using 32bit addressing, overwriting memory if HWS is located
1297 	 * above 4GB.
1298 	 *
1299 	 * The documentation also mentions an issue with undefined
1300 	 * behaviour if any general state is accessed within a page above 4GB,
1301 	 * which also needs to be handled carefully.
1302 	 */
1303 	if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
1304 		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1305 
1306 		if (ret) {
1307 			DRM_ERROR("failed to set DMA mask\n");
1308 
1309 			goto err_ggtt;
1310 		}
1311 	}
1312 
1313 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
1314 			   PM_QOS_DEFAULT_VALUE);
1315 
1316 	/* BIOS often leaves RC6 enabled, but disable it for hw init */
1317 	intel_sanitize_gt_powersave(dev_priv);
1318 
1319 	intel_gt_init_workarounds(dev_priv);
1320 
1321 	/* On the 945G/GM, the chipset reports the MSI capability on the
1322 	 * integrated graphics even though the support isn't actually there
1323 	 * according to the published specs.  It doesn't appear to function
1324 	 * correctly in testing on 945G.
1325 	 * This may be a side effect of MSI having been made available for PEG
1326 	 * and the registers being closely associated.
1327 	 *
1328 	 * According to chipset errata, on the 965GM, MSI interrupts may
1329 	 * be lost or delayed, and was defeatured. MSI interrupts seem to
1330 	 * get lost on g4x as well, and interrupt delivery seems to stay
1331 	 * properly dead afterwards. So we'll just disable them for all
1332 	 * pre-gen5 chipsets.
1333 	 *
1334 	 * dp aux and gmbus irq on gen4 seems to be able to generate legacy
1335 	 * interrupts even when in MSI mode. This results in spurious
1336 	 * interrupt warnings if the legacy irq no. is shared with another
1337 	 * device. The kernel then disables that interrupt source and so
1338 	 * prevents the other device from working properly.
1339 	 */
1340 	if (INTEL_GEN(dev_priv) >= 5) {
1341 		if (pci_enable_msi(pdev) < 0)
1342 			DRM_DEBUG_DRIVER("can't enable MSI");
1343 	}
1344 
1345 	ret = intel_gvt_init(dev_priv);
1346 	if (ret)
1347 		goto err_msi;
1348 
1349 	intel_opregion_setup(dev_priv);
1350 	/*
1351 	 * Fill the dram structure to get the system raw bandwidth and
1352 	 * dram info. This will be used for memory latency calculation.
1353 	 */
1354 	intel_get_dram_info(dev_priv);
1355 
1356 	intel_bw_init_hw(dev_priv);
1357 
1358 	return 0;
1359 
1360 err_msi:
1361 	if (pdev->msi_enabled)
1362 		pci_disable_msi(pdev);
1363 	pm_qos_remove_request(&dev_priv->pm_qos);
1364 err_ggtt:
1365 	i915_ggtt_driver_release(dev_priv);
1366 err_perf:
1367 	i915_perf_fini(dev_priv);
1368 	return ret;
1369 }
1370 
1371 /**
1372  * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
1373  * @dev_priv: device private
1374  */
i915_driver_hw_remove(struct drm_i915_private * dev_priv)1375 static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
1376 {
1377 	struct pci_dev *pdev = dev_priv->drm.pdev;
1378 
1379 	i915_perf_fini(dev_priv);
1380 
1381 	if (pdev->msi_enabled)
1382 		pci_disable_msi(pdev);
1383 
1384 	pm_qos_remove_request(&dev_priv->pm_qos);
1385 }
1386 
1387 /**
1388  * i915_driver_register - register the driver with the rest of the system
1389  * @dev_priv: device private
1390  *
1391  * Perform any steps necessary to make the driver available via kernel
1392  * internal or userspace interfaces.
1393  */
i915_driver_register(struct drm_i915_private * dev_priv)1394 static void i915_driver_register(struct drm_i915_private *dev_priv)
1395 {
1396 	struct drm_device *dev = &dev_priv->drm;
1397 
1398 	i915_gem_driver_register(dev_priv);
1399 	i915_pmu_register(dev_priv);
1400 
1401 	/*
1402 	 * Notify a valid surface after modesetting,
1403 	 * when running inside a VM.
1404 	 */
1405 	if (intel_vgpu_active(dev_priv))
1406 		I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1407 
1408 	/* Reveal our presence to userspace */
1409 	if (drm_dev_register(dev, 0) == 0) {
1410 		i915_debugfs_register(dev_priv);
1411 		i915_setup_sysfs(dev_priv);
1412 
1413 		/* Depends on sysfs having been initialized */
1414 		i915_perf_register(dev_priv);
1415 	} else
1416 		DRM_ERROR("Failed to register driver for userspace access!\n");
1417 
1418 	if (HAS_DISPLAY(dev_priv)) {
1419 		/* Must be done after probing outputs */
1420 		intel_opregion_register(dev_priv);
1421 		acpi_video_register();
1422 	}
1423 
1424 	if (IS_GEN(dev_priv, 5))
1425 		intel_gpu_ips_init(dev_priv);
1426 
1427 	intel_audio_init(dev_priv);
1428 
1429 	/*
1430 	 * Some ports require correctly set-up hpd registers for detection to
1431 	 * work properly (leading to ghost connected connector status), e.g. VGA
1432 	 * on gm45.  Hence we can only set up the initial fbdev config after hpd
1433 	 * irqs are fully enabled. We do it last so that the async config
1434 	 * cannot run before the connectors are registered.
1435 	 */
1436 	intel_fbdev_initial_config_async(dev);
1437 
1438 	/*
1439 	 * We need to coordinate the hotplugs with the asynchronous fbdev
1440 	 * configuration, for which we use the fbdev->async_cookie.
1441 	 */
1442 	if (HAS_DISPLAY(dev_priv))
1443 		drm_kms_helper_poll_init(dev);
1444 
1445 	intel_power_domains_enable(dev_priv);
1446 	intel_runtime_pm_enable(&dev_priv->runtime_pm);
1447 }
1448 
1449 /**
1450  * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
1451  * @dev_priv: device private
1452  */
i915_driver_unregister(struct drm_i915_private * dev_priv)1453 static void i915_driver_unregister(struct drm_i915_private *dev_priv)
1454 {
1455 	intel_runtime_pm_disable(&dev_priv->runtime_pm);
1456 	intel_power_domains_disable(dev_priv);
1457 
1458 	intel_fbdev_unregister(dev_priv);
1459 	intel_audio_deinit(dev_priv);
1460 
1461 	/*
1462 	 * After flushing the fbdev (incl. a late async config which will
1463 	 * have delayed queuing of a hotplug event), then flush the hotplug
1464 	 * events.
1465 	 */
1466 	drm_kms_helper_poll_fini(&dev_priv->drm);
1467 
1468 	intel_gpu_ips_teardown();
1469 	acpi_video_unregister();
1470 	intel_opregion_unregister(dev_priv);
1471 
1472 	i915_perf_unregister(dev_priv);
1473 	i915_pmu_unregister(dev_priv);
1474 
1475 	i915_teardown_sysfs(dev_priv);
1476 	drm_dev_unplug(&dev_priv->drm);
1477 
1478 	i915_gem_driver_unregister(dev_priv);
1479 }
1480 
i915_welcome_messages(struct drm_i915_private * dev_priv)1481 static void i915_welcome_messages(struct drm_i915_private *dev_priv)
1482 {
1483 	if (drm_debug & DRM_UT_DRIVER) {
1484 		struct drm_printer p = drm_debug_printer("i915 device info:");
1485 
1486 		drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
1487 			   INTEL_DEVID(dev_priv),
1488 			   INTEL_REVID(dev_priv),
1489 			   intel_platform_name(INTEL_INFO(dev_priv)->platform),
1490 			   intel_subplatform(RUNTIME_INFO(dev_priv),
1491 					     INTEL_INFO(dev_priv)->platform),
1492 			   INTEL_GEN(dev_priv));
1493 
1494 		intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
1495 		intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
1496 	}
1497 
1498 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
1499 		DRM_INFO("DRM_I915_DEBUG enabled\n");
1500 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1501 		DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
1502 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
1503 		DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n");
1504 }
1505 
1506 static struct drm_i915_private *
i915_driver_create(struct pci_dev * pdev,const struct pci_device_id * ent)1507 i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
1508 {
1509 	const struct intel_device_info *match_info =
1510 		(struct intel_device_info *)ent->driver_data;
1511 	struct intel_device_info *device_info;
1512 	struct drm_i915_private *i915;
1513 	int err;
1514 
1515 	i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
1516 	if (!i915)
1517 		return ERR_PTR(-ENOMEM);
1518 
1519 	err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
1520 	if (err) {
1521 		kfree(i915);
1522 		return ERR_PTR(err);
1523 	}
1524 
1525 	i915->drm.dev_private = i915;
1526 
1527 	i915->drm.pdev = pdev;
1528 	pci_set_drvdata(pdev, i915);
1529 
1530 	/* Setup the write-once "constant" device info */
1531 	device_info = mkwrite_device_info(i915);
1532 	memcpy(device_info, match_info, sizeof(*device_info));
1533 	RUNTIME_INFO(i915)->device_id = pdev->device;
1534 
1535 	BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
1536 
1537 	return i915;
1538 }
1539 
i915_driver_destroy(struct drm_i915_private * i915)1540 static void i915_driver_destroy(struct drm_i915_private *i915)
1541 {
1542 	struct pci_dev *pdev = i915->drm.pdev;
1543 
1544 	drm_dev_fini(&i915->drm);
1545 	kfree(i915);
1546 
1547 	/* And make sure we never chase our dangling pointer from pci_dev */
1548 	pci_set_drvdata(pdev, NULL);
1549 }
1550 
1551 /**
1552  * i915_driver_probe - setup chip and create an initial config
1553  * @pdev: PCI device
1554  * @ent: matching PCI ID entry
1555  *
1556  * The driver probe routine has to do several things:
1557  *   - drive output discovery via intel_modeset_init()
1558  *   - initialize the memory manager
1559  *   - allocate initial config memory
1560  *   - setup the DRM framebuffer with the allocated memory
1561  */
i915_driver_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1562 int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1563 {
1564 	const struct intel_device_info *match_info =
1565 		(struct intel_device_info *)ent->driver_data;
1566 	struct drm_i915_private *dev_priv;
1567 	int ret;
1568 
1569 	dev_priv = i915_driver_create(pdev, ent);
1570 	if (IS_ERR(dev_priv))
1571 		return PTR_ERR(dev_priv);
1572 
1573 	/* Disable nuclear pageflip by default on pre-ILK */
1574 	if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
1575 		dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
1576 
1577 	ret = pci_enable_device(pdev);
1578 	if (ret)
1579 		goto out_fini;
1580 
1581 	ret = i915_driver_early_probe(dev_priv);
1582 	if (ret < 0)
1583 		goto out_pci_disable;
1584 
1585 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1586 
1587 	i915_detect_vgpu(dev_priv);
1588 
1589 	ret = i915_driver_mmio_probe(dev_priv);
1590 	if (ret < 0)
1591 		goto out_runtime_pm_put;
1592 
1593 	ret = i915_driver_hw_probe(dev_priv);
1594 	if (ret < 0)
1595 		goto out_cleanup_mmio;
1596 
1597 	ret = i915_driver_modeset_probe(&dev_priv->drm);
1598 	if (ret < 0)
1599 		goto out_cleanup_hw;
1600 
1601 	i915_driver_register(dev_priv);
1602 
1603 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1604 
1605 	i915_welcome_messages(dev_priv);
1606 
1607 	return 0;
1608 
1609 out_cleanup_hw:
1610 	i915_driver_hw_remove(dev_priv);
1611 	i915_ggtt_driver_release(dev_priv);
1612 
1613 	/* Paranoia: make sure we have disabled everything before we exit. */
1614 	intel_sanitize_gt_powersave(dev_priv);
1615 out_cleanup_mmio:
1616 	i915_driver_mmio_release(dev_priv);
1617 out_runtime_pm_put:
1618 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1619 	i915_driver_late_release(dev_priv);
1620 out_pci_disable:
1621 	pci_disable_device(pdev);
1622 out_fini:
1623 	i915_probe_error(dev_priv, "Device initialization failed (%d)\n", ret);
1624 	i915_driver_destroy(dev_priv);
1625 	return ret;
1626 }
1627 
i915_driver_remove(struct drm_i915_private * i915)1628 void i915_driver_remove(struct drm_i915_private *i915)
1629 {
1630 	struct pci_dev *pdev = i915->drm.pdev;
1631 
1632 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
1633 
1634 	i915_driver_unregister(i915);
1635 
1636 	/*
1637 	 * After unregistering the device to prevent any new users, cancel
1638 	 * all in-flight requests so that we can quickly unbind the active
1639 	 * resources.
1640 	 */
1641 	intel_gt_set_wedged(&i915->gt);
1642 
1643 	/* Flush any external code that still may be under the RCU lock */
1644 	synchronize_rcu();
1645 
1646 	i915_gem_suspend(i915);
1647 
1648 	drm_atomic_helper_shutdown(&i915->drm);
1649 
1650 	intel_gvt_driver_remove(i915);
1651 
1652 	intel_modeset_driver_remove(&i915->drm);
1653 
1654 	intel_bios_driver_remove(i915);
1655 
1656 	vga_switcheroo_unregister_client(pdev);
1657 	vga_client_register(pdev, NULL, NULL, NULL);
1658 
1659 	intel_csr_ucode_fini(i915);
1660 
1661 	/* Free error state after interrupts are fully disabled. */
1662 	cancel_delayed_work_sync(&i915->gt.hangcheck.work);
1663 	i915_reset_error_state(i915);
1664 
1665 	i915_gem_driver_remove(i915);
1666 
1667 	intel_power_domains_driver_remove(i915);
1668 
1669 	i915_driver_hw_remove(i915);
1670 
1671 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
1672 }
1673 
i915_driver_release(struct drm_device * dev)1674 static void i915_driver_release(struct drm_device *dev)
1675 {
1676 	struct drm_i915_private *dev_priv = to_i915(dev);
1677 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1678 
1679 	disable_rpm_wakeref_asserts(rpm);
1680 
1681 	i915_gem_driver_release(dev_priv);
1682 
1683 	i915_ggtt_driver_release(dev_priv);
1684 
1685 	/* Paranoia: make sure we have disabled everything before we exit. */
1686 	intel_sanitize_gt_powersave(dev_priv);
1687 
1688 	i915_driver_mmio_release(dev_priv);
1689 
1690 	enable_rpm_wakeref_asserts(rpm);
1691 	intel_runtime_pm_driver_release(rpm);
1692 
1693 	i915_driver_late_release(dev_priv);
1694 	i915_driver_destroy(dev_priv);
1695 }
1696 
i915_driver_open(struct drm_device * dev,struct drm_file * file)1697 static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1698 {
1699 	struct drm_i915_private *i915 = to_i915(dev);
1700 	int ret;
1701 
1702 	ret = i915_gem_open(i915, file);
1703 	if (ret)
1704 		return ret;
1705 
1706 	return 0;
1707 }
1708 
1709 /**
1710  * i915_driver_lastclose - clean up after all DRM clients have exited
1711  * @dev: DRM device
1712  *
1713  * Take care of cleaning up after all DRM clients have exited.  In the
1714  * mode setting case, we want to restore the kernel's initial mode (just
1715  * in case the last client left us in a bad state).
1716  *
1717  * Additionally, in the non-mode setting case, we'll tear down the GTT
1718  * and DMA structures, since the kernel won't be using them, and clea
1719  * up any GEM state.
1720  */
i915_driver_lastclose(struct drm_device * dev)1721 static void i915_driver_lastclose(struct drm_device *dev)
1722 {
1723 	intel_fbdev_restore_mode(dev);
1724 	vga_switcheroo_process_delayed_switch();
1725 }
1726 
i915_driver_postclose(struct drm_device * dev,struct drm_file * file)1727 static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1728 {
1729 	struct drm_i915_file_private *file_priv = file->driver_priv;
1730 
1731 	mutex_lock(&dev->struct_mutex);
1732 	i915_gem_context_close(file);
1733 	i915_gem_release(dev, file);
1734 	mutex_unlock(&dev->struct_mutex);
1735 
1736 	kfree(file_priv);
1737 
1738 	/* Catch up with all the deferred frees from "this" client */
1739 	i915_gem_flush_free_objects(to_i915(dev));
1740 }
1741 
intel_suspend_encoders(struct drm_i915_private * dev_priv)1742 static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
1743 {
1744 	struct drm_device *dev = &dev_priv->drm;
1745 	struct intel_encoder *encoder;
1746 
1747 	drm_modeset_lock_all(dev);
1748 	for_each_intel_encoder(dev, encoder)
1749 		if (encoder->suspend)
1750 			encoder->suspend(encoder);
1751 	drm_modeset_unlock_all(dev);
1752 }
1753 
1754 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1755 			      bool rpm_resume);
1756 static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
1757 
suspend_to_idle(struct drm_i915_private * dev_priv)1758 static bool suspend_to_idle(struct drm_i915_private *dev_priv)
1759 {
1760 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
1761 	if (acpi_target_system_state() < ACPI_STATE_S3)
1762 		return true;
1763 #endif
1764 	return false;
1765 }
1766 
i915_drm_prepare(struct drm_device * dev)1767 static int i915_drm_prepare(struct drm_device *dev)
1768 {
1769 	struct drm_i915_private *i915 = to_i915(dev);
1770 
1771 	/*
1772 	 * NB intel_display_suspend() may issue new requests after we've
1773 	 * ostensibly marked the GPU as ready-to-sleep here. We need to
1774 	 * split out that work and pull it forward so that after point,
1775 	 * the GPU is not woken again.
1776 	 */
1777 	i915_gem_suspend(i915);
1778 
1779 	return 0;
1780 }
1781 
i915_drm_suspend(struct drm_device * dev)1782 static int i915_drm_suspend(struct drm_device *dev)
1783 {
1784 	struct drm_i915_private *dev_priv = to_i915(dev);
1785 	struct pci_dev *pdev = dev_priv->drm.pdev;
1786 	pci_power_t opregion_target_state;
1787 
1788 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1789 
1790 	/* We do a lot of poking in a lot of registers, make sure they work
1791 	 * properly. */
1792 	intel_power_domains_disable(dev_priv);
1793 
1794 	drm_kms_helper_poll_disable(dev);
1795 
1796 	pci_save_state(pdev);
1797 
1798 	intel_display_suspend(dev);
1799 
1800 	intel_dp_mst_suspend(dev_priv);
1801 
1802 	intel_runtime_pm_disable_interrupts(dev_priv);
1803 	intel_hpd_cancel_work(dev_priv);
1804 
1805 	intel_suspend_encoders(dev_priv);
1806 
1807 	intel_suspend_hw(dev_priv);
1808 
1809 	i915_gem_suspend_gtt_mappings(dev_priv);
1810 
1811 	i915_save_state(dev_priv);
1812 
1813 	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
1814 	intel_opregion_suspend(dev_priv, opregion_target_state);
1815 
1816 	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
1817 
1818 	dev_priv->suspend_count++;
1819 
1820 	intel_csr_ucode_suspend(dev_priv);
1821 
1822 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1823 
1824 	return 0;
1825 }
1826 
1827 static enum i915_drm_suspend_mode
get_suspend_mode(struct drm_i915_private * dev_priv,bool hibernate)1828 get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
1829 {
1830 	if (hibernate)
1831 		return I915_DRM_SUSPEND_HIBERNATE;
1832 
1833 	if (suspend_to_idle(dev_priv))
1834 		return I915_DRM_SUSPEND_IDLE;
1835 
1836 	return I915_DRM_SUSPEND_MEM;
1837 }
1838 
i915_drm_suspend_late(struct drm_device * dev,bool hibernation)1839 static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1840 {
1841 	struct drm_i915_private *dev_priv = to_i915(dev);
1842 	struct pci_dev *pdev = dev_priv->drm.pdev;
1843 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1844 	int ret = 0;
1845 
1846 	disable_rpm_wakeref_asserts(rpm);
1847 
1848 	i915_gem_suspend_late(dev_priv);
1849 
1850 	i915_rc6_ctx_wa_suspend(dev_priv);
1851 
1852 	intel_uncore_suspend(&dev_priv->uncore);
1853 
1854 	intel_power_domains_suspend(dev_priv,
1855 				    get_suspend_mode(dev_priv, hibernation));
1856 
1857 	intel_display_power_suspend_late(dev_priv);
1858 
1859 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1860 		ret = vlv_suspend_complete(dev_priv);
1861 
1862 	if (ret) {
1863 		DRM_ERROR("Suspend complete failed: %d\n", ret);
1864 		intel_power_domains_resume(dev_priv);
1865 
1866 		goto out;
1867 	}
1868 
1869 	pci_disable_device(pdev);
1870 	/*
1871 	 * During hibernation on some platforms the BIOS may try to access
1872 	 * the device even though it's already in D3 and hang the machine. So
1873 	 * leave the device in D0 on those platforms and hope the BIOS will
1874 	 * power down the device properly. The issue was seen on multiple old
1875 	 * GENs with different BIOS vendors, so having an explicit blacklist
1876 	 * is inpractical; apply the workaround on everything pre GEN6. The
1877 	 * platforms where the issue was seen:
1878 	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
1879 	 * Fujitsu FSC S7110
1880 	 * Acer Aspire 1830T
1881 	 */
1882 	if (!(hibernation && INTEL_GEN(dev_priv) < 6))
1883 		pci_set_power_state(pdev, PCI_D3hot);
1884 
1885 out:
1886 	enable_rpm_wakeref_asserts(rpm);
1887 	if (!dev_priv->uncore.user_forcewake_count)
1888 		intel_runtime_pm_driver_release(rpm);
1889 
1890 	return ret;
1891 }
1892 
1893 static int
i915_suspend_switcheroo(struct drm_i915_private * i915,pm_message_t state)1894 i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
1895 {
1896 	int error;
1897 
1898 	if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
1899 			 state.event != PM_EVENT_FREEZE))
1900 		return -EINVAL;
1901 
1902 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1903 		return 0;
1904 
1905 	error = i915_drm_suspend(&i915->drm);
1906 	if (error)
1907 		return error;
1908 
1909 	return i915_drm_suspend_late(&i915->drm, false);
1910 }
1911 
i915_drm_resume(struct drm_device * dev)1912 static int i915_drm_resume(struct drm_device *dev)
1913 {
1914 	struct drm_i915_private *dev_priv = to_i915(dev);
1915 	int ret;
1916 
1917 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1918 	intel_sanitize_gt_powersave(dev_priv);
1919 
1920 	i915_gem_sanitize(dev_priv);
1921 
1922 	ret = i915_ggtt_enable_hw(dev_priv);
1923 	if (ret)
1924 		DRM_ERROR("failed to re-enable GGTT\n");
1925 
1926 	mutex_lock(&dev_priv->drm.struct_mutex);
1927 	i915_gem_restore_gtt_mappings(dev_priv);
1928 	i915_gem_restore_fences(dev_priv);
1929 	mutex_unlock(&dev_priv->drm.struct_mutex);
1930 
1931 	intel_csr_ucode_resume(dev_priv);
1932 
1933 	i915_restore_state(dev_priv);
1934 	intel_pps_unlock_regs_wa(dev_priv);
1935 
1936 	intel_init_pch_refclk(dev_priv);
1937 
1938 	/*
1939 	 * Interrupts have to be enabled before any batches are run. If not the
1940 	 * GPU will hang. i915_gem_init_hw() will initiate batches to
1941 	 * update/restore the context.
1942 	 *
1943 	 * drm_mode_config_reset() needs AUX interrupts.
1944 	 *
1945 	 * Modeset enabling in intel_modeset_init_hw() also needs working
1946 	 * interrupts.
1947 	 */
1948 	intel_runtime_pm_enable_interrupts(dev_priv);
1949 
1950 	drm_mode_config_reset(dev);
1951 
1952 	i915_gem_resume(dev_priv);
1953 
1954 	intel_modeset_init_hw(dev);
1955 	intel_init_clock_gating(dev_priv);
1956 
1957 	spin_lock_irq(&dev_priv->irq_lock);
1958 	if (dev_priv->display.hpd_irq_setup)
1959 		dev_priv->display.hpd_irq_setup(dev_priv);
1960 	spin_unlock_irq(&dev_priv->irq_lock);
1961 
1962 	intel_dp_mst_resume(dev_priv);
1963 
1964 	intel_display_resume(dev);
1965 
1966 	drm_kms_helper_poll_enable(dev);
1967 
1968 	/*
1969 	 * ... but also need to make sure that hotplug processing
1970 	 * doesn't cause havoc. Like in the driver load code we don't
1971 	 * bother with the tiny race here where we might lose hotplug
1972 	 * notifications.
1973 	 * */
1974 	intel_hpd_init(dev_priv);
1975 
1976 	intel_opregion_resume(dev_priv);
1977 
1978 	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
1979 
1980 	intel_power_domains_enable(dev_priv);
1981 
1982 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1983 
1984 	return 0;
1985 }
1986 
i915_drm_resume_early(struct drm_device * dev)1987 static int i915_drm_resume_early(struct drm_device *dev)
1988 {
1989 	struct drm_i915_private *dev_priv = to_i915(dev);
1990 	struct pci_dev *pdev = dev_priv->drm.pdev;
1991 	int ret;
1992 
1993 	/*
1994 	 * We have a resume ordering issue with the snd-hda driver also
1995 	 * requiring our device to be power up. Due to the lack of a
1996 	 * parent/child relationship we currently solve this with an early
1997 	 * resume hook.
1998 	 *
1999 	 * FIXME: This should be solved with a special hdmi sink device or
2000 	 * similar so that power domains can be employed.
2001 	 */
2002 
2003 	/*
2004 	 * Note that we need to set the power state explicitly, since we
2005 	 * powered off the device during freeze and the PCI core won't power
2006 	 * it back up for us during thaw. Powering off the device during
2007 	 * freeze is not a hard requirement though, and during the
2008 	 * suspend/resume phases the PCI core makes sure we get here with the
2009 	 * device powered on. So in case we change our freeze logic and keep
2010 	 * the device powered we can also remove the following set power state
2011 	 * call.
2012 	 */
2013 	ret = pci_set_power_state(pdev, PCI_D0);
2014 	if (ret) {
2015 		DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
2016 		return ret;
2017 	}
2018 
2019 	/*
2020 	 * Note that pci_enable_device() first enables any parent bridge
2021 	 * device and only then sets the power state for this device. The
2022 	 * bridge enabling is a nop though, since bridge devices are resumed
2023 	 * first. The order of enabling power and enabling the device is
2024 	 * imposed by the PCI core as described above, so here we preserve the
2025 	 * same order for the freeze/thaw phases.
2026 	 *
2027 	 * TODO: eventually we should remove pci_disable_device() /
2028 	 * pci_enable_enable_device() from suspend/resume. Due to how they
2029 	 * depend on the device enable refcount we can't anyway depend on them
2030 	 * disabling/enabling the device.
2031 	 */
2032 	if (pci_enable_device(pdev))
2033 		return -EIO;
2034 
2035 	pci_set_master(pdev);
2036 
2037 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2038 
2039 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2040 		ret = vlv_resume_prepare(dev_priv, false);
2041 	if (ret)
2042 		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
2043 			  ret);
2044 
2045 	intel_uncore_resume_early(&dev_priv->uncore);
2046 
2047 	intel_gt_check_and_clear_faults(&dev_priv->gt);
2048 
2049 	intel_display_power_resume_early(dev_priv);
2050 
2051 	intel_sanitize_gt_powersave(dev_priv);
2052 
2053 	intel_power_domains_resume(dev_priv);
2054 
2055 	i915_rc6_ctx_wa_resume(dev_priv);
2056 
2057 	intel_gt_sanitize(&dev_priv->gt, true);
2058 
2059 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2060 
2061 	return ret;
2062 }
2063 
i915_resume_switcheroo(struct drm_i915_private * i915)2064 static int i915_resume_switcheroo(struct drm_i915_private *i915)
2065 {
2066 	int ret;
2067 
2068 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2069 		return 0;
2070 
2071 	ret = i915_drm_resume_early(&i915->drm);
2072 	if (ret)
2073 		return ret;
2074 
2075 	return i915_drm_resume(&i915->drm);
2076 }
2077 
i915_pm_prepare(struct device * kdev)2078 static int i915_pm_prepare(struct device *kdev)
2079 {
2080 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2081 
2082 	if (!i915) {
2083 		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
2084 		return -ENODEV;
2085 	}
2086 
2087 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2088 		return 0;
2089 
2090 	return i915_drm_prepare(&i915->drm);
2091 }
2092 
i915_pm_suspend(struct device * kdev)2093 static int i915_pm_suspend(struct device *kdev)
2094 {
2095 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2096 
2097 	if (!i915) {
2098 		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
2099 		return -ENODEV;
2100 	}
2101 
2102 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2103 		return 0;
2104 
2105 	return i915_drm_suspend(&i915->drm);
2106 }
2107 
i915_pm_suspend_late(struct device * kdev)2108 static int i915_pm_suspend_late(struct device *kdev)
2109 {
2110 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2111 
2112 	/*
2113 	 * We have a suspend ordering issue with the snd-hda driver also
2114 	 * requiring our device to be power up. Due to the lack of a
2115 	 * parent/child relationship we currently solve this with an late
2116 	 * suspend hook.
2117 	 *
2118 	 * FIXME: This should be solved with a special hdmi sink device or
2119 	 * similar so that power domains can be employed.
2120 	 */
2121 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2122 		return 0;
2123 
2124 	return i915_drm_suspend_late(&i915->drm, false);
2125 }
2126 
i915_pm_poweroff_late(struct device * kdev)2127 static int i915_pm_poweroff_late(struct device *kdev)
2128 {
2129 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2130 
2131 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2132 		return 0;
2133 
2134 	return i915_drm_suspend_late(&i915->drm, true);
2135 }
2136 
i915_pm_resume_early(struct device * kdev)2137 static int i915_pm_resume_early(struct device *kdev)
2138 {
2139 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2140 
2141 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2142 		return 0;
2143 
2144 	return i915_drm_resume_early(&i915->drm);
2145 }
2146 
i915_pm_resume(struct device * kdev)2147 static int i915_pm_resume(struct device *kdev)
2148 {
2149 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2150 
2151 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2152 		return 0;
2153 
2154 	return i915_drm_resume(&i915->drm);
2155 }
2156 
2157 /* freeze: before creating the hibernation_image */
i915_pm_freeze(struct device * kdev)2158 static int i915_pm_freeze(struct device *kdev)
2159 {
2160 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2161 	int ret;
2162 
2163 	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
2164 		ret = i915_drm_suspend(&i915->drm);
2165 		if (ret)
2166 			return ret;
2167 	}
2168 
2169 	ret = i915_gem_freeze(i915);
2170 	if (ret)
2171 		return ret;
2172 
2173 	return 0;
2174 }
2175 
i915_pm_freeze_late(struct device * kdev)2176 static int i915_pm_freeze_late(struct device *kdev)
2177 {
2178 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
2179 	int ret;
2180 
2181 	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
2182 		ret = i915_drm_suspend_late(&i915->drm, true);
2183 		if (ret)
2184 			return ret;
2185 	}
2186 
2187 	ret = i915_gem_freeze_late(i915);
2188 	if (ret)
2189 		return ret;
2190 
2191 	return 0;
2192 }
2193 
2194 /* thaw: called after creating the hibernation image, but before turning off. */
i915_pm_thaw_early(struct device * kdev)2195 static int i915_pm_thaw_early(struct device *kdev)
2196 {
2197 	return i915_pm_resume_early(kdev);
2198 }
2199 
i915_pm_thaw(struct device * kdev)2200 static int i915_pm_thaw(struct device *kdev)
2201 {
2202 	return i915_pm_resume(kdev);
2203 }
2204 
2205 /* restore: called after loading the hibernation image. */
i915_pm_restore_early(struct device * kdev)2206 static int i915_pm_restore_early(struct device *kdev)
2207 {
2208 	return i915_pm_resume_early(kdev);
2209 }
2210 
i915_pm_restore(struct device * kdev)2211 static int i915_pm_restore(struct device *kdev)
2212 {
2213 	return i915_pm_resume(kdev);
2214 }
2215 
2216 /*
2217  * Save all Gunit registers that may be lost after a D3 and a subsequent
2218  * S0i[R123] transition. The list of registers needing a save/restore is
2219  * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
2220  * registers in the following way:
2221  * - Driver: saved/restored by the driver
2222  * - Punit : saved/restored by the Punit firmware
2223  * - No, w/o marking: no need to save/restore, since the register is R/O or
2224  *                    used internally by the HW in a way that doesn't depend
2225  *                    keeping the content across a suspend/resume.
2226  * - Debug : used for debugging
2227  *
2228  * We save/restore all registers marked with 'Driver', with the following
2229  * exceptions:
2230  * - Registers out of use, including also registers marked with 'Debug'.
2231  *   These have no effect on the driver's operation, so we don't save/restore
2232  *   them to reduce the overhead.
2233  * - Registers that are fully setup by an initialization function called from
2234  *   the resume path. For example many clock gating and RPS/RC6 registers.
2235  * - Registers that provide the right functionality with their reset defaults.
2236  *
2237  * TODO: Except for registers that based on the above 3 criteria can be safely
2238  * ignored, we save/restore all others, practically treating the HW context as
2239  * a black-box for the driver. Further investigation is needed to reduce the
2240  * saved/restored registers even further, by following the same 3 criteria.
2241  */
vlv_save_gunit_s0ix_state(struct drm_i915_private * dev_priv)2242 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2243 {
2244 	struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
2245 	int i;
2246 
2247 	if (!s)
2248 		return;
2249 
2250 	/* GAM 0x4000-0x4770 */
2251 	s->wr_watermark		= I915_READ(GEN7_WR_WATERMARK);
2252 	s->gfx_prio_ctrl	= I915_READ(GEN7_GFX_PRIO_CTRL);
2253 	s->arb_mode		= I915_READ(ARB_MODE);
2254 	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
2255 	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);
2256 
2257 	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2258 		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
2259 
2260 	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
2261 	s->gfx_max_req_count	= I915_READ(GEN7_GFX_MAX_REQ_COUNT);
2262 
2263 	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
2264 	s->ecochk		= I915_READ(GAM_ECOCHK);
2265 	s->bsd_hwsp		= I915_READ(BSD_HWS_PGA_GEN7);
2266 	s->blt_hwsp		= I915_READ(BLT_HWS_PGA_GEN7);
2267 
2268 	s->tlb_rd_addr		= I915_READ(GEN7_TLB_RD_ADDR);
2269 
2270 	/* MBC 0x9024-0x91D0, 0x8500 */
2271 	s->g3dctl		= I915_READ(VLV_G3DCTL);
2272 	s->gsckgctl		= I915_READ(VLV_GSCKGCTL);
2273 	s->mbctl		= I915_READ(GEN6_MBCTL);
2274 
2275 	/* GCP 0x9400-0x9424, 0x8100-0x810C */
2276 	s->ucgctl1		= I915_READ(GEN6_UCGCTL1);
2277 	s->ucgctl3		= I915_READ(GEN6_UCGCTL3);
2278 	s->rcgctl1		= I915_READ(GEN6_RCGCTL1);
2279 	s->rcgctl2		= I915_READ(GEN6_RCGCTL2);
2280 	s->rstctl		= I915_READ(GEN6_RSTCTL);
2281 	s->misccpctl		= I915_READ(GEN7_MISCCPCTL);
2282 
2283 	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2284 	s->gfxpause		= I915_READ(GEN6_GFXPAUSE);
2285 	s->rpdeuhwtc		= I915_READ(GEN6_RPDEUHWTC);
2286 	s->rpdeuc		= I915_READ(GEN6_RPDEUC);
2287 	s->ecobus		= I915_READ(ECOBUS);
2288 	s->pwrdwnupctl		= I915_READ(VLV_PWRDWNUPCTL);
2289 	s->rp_down_timeout	= I915_READ(GEN6_RP_DOWN_TIMEOUT);
2290 	s->rp_deucsw		= I915_READ(GEN6_RPDEUCSW);
2291 	s->rcubmabdtmr		= I915_READ(GEN6_RCUBMABDTMR);
2292 	s->rcedata		= I915_READ(VLV_RCEDATA);
2293 	s->spare2gh		= I915_READ(VLV_SPAREG2H);
2294 
2295 	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2296 	s->gt_imr		= I915_READ(GTIMR);
2297 	s->gt_ier		= I915_READ(GTIER);
2298 	s->pm_imr		= I915_READ(GEN6_PMIMR);
2299 	s->pm_ier		= I915_READ(GEN6_PMIER);
2300 
2301 	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2302 		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
2303 
2304 	/* GT SA CZ domain, 0x100000-0x138124 */
2305 	s->tilectl		= I915_READ(TILECTL);
2306 	s->gt_fifoctl		= I915_READ(GTFIFOCTL);
2307 	s->gtlc_wake_ctrl	= I915_READ(VLV_GTLC_WAKE_CTRL);
2308 	s->gtlc_survive		= I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2309 	s->pmwgicz		= I915_READ(VLV_PMWGICZ);
2310 
2311 	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
2312 	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
2313 	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
2314 	s->pcbr			= I915_READ(VLV_PCBR);
2315 	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);
2316 
2317 	/*
2318 	 * Not saving any of:
2319 	 * DFT,		0x9800-0x9EC0
2320 	 * SARB,	0xB000-0xB1FC
2321 	 * GAC,		0x5208-0x524C, 0x14000-0x14C000
2322 	 * PCI CFG
2323 	 */
2324 }
2325 
vlv_restore_gunit_s0ix_state(struct drm_i915_private * dev_priv)2326 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2327 {
2328 	struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
2329 	u32 val;
2330 	int i;
2331 
2332 	if (!s)
2333 		return;
2334 
2335 	/* GAM 0x4000-0x4770 */
2336 	I915_WRITE(GEN7_WR_WATERMARK,	s->wr_watermark);
2337 	I915_WRITE(GEN7_GFX_PRIO_CTRL,	s->gfx_prio_ctrl);
2338 	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
2339 	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
2340 	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);
2341 
2342 	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2343 		I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
2344 
2345 	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
2346 	I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
2347 
2348 	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
2349 	I915_WRITE(GAM_ECOCHK,		s->ecochk);
2350 	I915_WRITE(BSD_HWS_PGA_GEN7,	s->bsd_hwsp);
2351 	I915_WRITE(BLT_HWS_PGA_GEN7,	s->blt_hwsp);
2352 
2353 	I915_WRITE(GEN7_TLB_RD_ADDR,	s->tlb_rd_addr);
2354 
2355 	/* MBC 0x9024-0x91D0, 0x8500 */
2356 	I915_WRITE(VLV_G3DCTL,		s->g3dctl);
2357 	I915_WRITE(VLV_GSCKGCTL,	s->gsckgctl);
2358 	I915_WRITE(GEN6_MBCTL,		s->mbctl);
2359 
2360 	/* GCP 0x9400-0x9424, 0x8100-0x810C */
2361 	I915_WRITE(GEN6_UCGCTL1,	s->ucgctl1);
2362 	I915_WRITE(GEN6_UCGCTL3,	s->ucgctl3);
2363 	I915_WRITE(GEN6_RCGCTL1,	s->rcgctl1);
2364 	I915_WRITE(GEN6_RCGCTL2,	s->rcgctl2);
2365 	I915_WRITE(GEN6_RSTCTL,		s->rstctl);
2366 	I915_WRITE(GEN7_MISCCPCTL,	s->misccpctl);
2367 
2368 	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2369 	I915_WRITE(GEN6_GFXPAUSE,	s->gfxpause);
2370 	I915_WRITE(GEN6_RPDEUHWTC,	s->rpdeuhwtc);
2371 	I915_WRITE(GEN6_RPDEUC,		s->rpdeuc);
2372 	I915_WRITE(ECOBUS,		s->ecobus);
2373 	I915_WRITE(VLV_PWRDWNUPCTL,	s->pwrdwnupctl);
2374 	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
2375 	I915_WRITE(GEN6_RPDEUCSW,	s->rp_deucsw);
2376 	I915_WRITE(GEN6_RCUBMABDTMR,	s->rcubmabdtmr);
2377 	I915_WRITE(VLV_RCEDATA,		s->rcedata);
2378 	I915_WRITE(VLV_SPAREG2H,	s->spare2gh);
2379 
2380 	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2381 	I915_WRITE(GTIMR,		s->gt_imr);
2382 	I915_WRITE(GTIER,		s->gt_ier);
2383 	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
2384 	I915_WRITE(GEN6_PMIER,		s->pm_ier);
2385 
2386 	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2387 		I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
2388 
2389 	/* GT SA CZ domain, 0x100000-0x138124 */
2390 	I915_WRITE(TILECTL,			s->tilectl);
2391 	I915_WRITE(GTFIFOCTL,			s->gt_fifoctl);
2392 	/*
2393 	 * Preserve the GT allow wake and GFX force clock bit, they are not
2394 	 * be restored, as they are used to control the s0ix suspend/resume
2395 	 * sequence by the caller.
2396 	 */
2397 	val = I915_READ(VLV_GTLC_WAKE_CTRL);
2398 	val &= VLV_GTLC_ALLOWWAKEREQ;
2399 	val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
2400 	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2401 
2402 	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2403 	val &= VLV_GFX_CLK_FORCE_ON_BIT;
2404 	val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
2405 	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2406 
2407 	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);
2408 
2409 	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
2410 	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
2411 	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
2412 	I915_WRITE(VLV_PCBR,			s->pcbr);
2413 	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
2414 }
2415 
vlv_wait_for_pw_status(struct drm_i915_private * i915,u32 mask,u32 val)2416 static int vlv_wait_for_pw_status(struct drm_i915_private *i915,
2417 				  u32 mask, u32 val)
2418 {
2419 	i915_reg_t reg = VLV_GTLC_PW_STATUS;
2420 	u32 reg_value;
2421 	int ret;
2422 
2423 	/* The HW does not like us polling for PW_STATUS frequently, so
2424 	 * use the sleeping loop rather than risk the busy spin within
2425 	 * intel_wait_for_register().
2426 	 *
2427 	 * Transitioning between RC6 states should be at most 2ms (see
2428 	 * valleyview_enable_rps) so use a 3ms timeout.
2429 	 */
2430 	ret = wait_for(((reg_value =
2431 			 intel_uncore_read_notrace(&i915->uncore, reg)) & mask)
2432 		       == val, 3);
2433 
2434 	/* just trace the final value */
2435 	trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2436 
2437 	return ret;
2438 }
2439 
vlv_force_gfx_clock(struct drm_i915_private * dev_priv,bool force_on)2440 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
2441 {
2442 	u32 val;
2443 	int err;
2444 
2445 	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2446 	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
2447 	if (force_on)
2448 		val |= VLV_GFX_CLK_FORCE_ON_BIT;
2449 	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2450 
2451 	if (!force_on)
2452 		return 0;
2453 
2454 	err = intel_wait_for_register(&dev_priv->uncore,
2455 				      VLV_GTLC_SURVIVABILITY_REG,
2456 				      VLV_GFX_CLK_STATUS_BIT,
2457 				      VLV_GFX_CLK_STATUS_BIT,
2458 				      20);
2459 	if (err)
2460 		DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
2461 			  I915_READ(VLV_GTLC_SURVIVABILITY_REG));
2462 
2463 	return err;
2464 }
2465 
vlv_allow_gt_wake(struct drm_i915_private * dev_priv,bool allow)2466 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
2467 {
2468 	u32 mask;
2469 	u32 val;
2470 	int err;
2471 
2472 	val = I915_READ(VLV_GTLC_WAKE_CTRL);
2473 	val &= ~VLV_GTLC_ALLOWWAKEREQ;
2474 	if (allow)
2475 		val |= VLV_GTLC_ALLOWWAKEREQ;
2476 	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2477 	POSTING_READ(VLV_GTLC_WAKE_CTRL);
2478 
2479 	mask = VLV_GTLC_ALLOWWAKEACK;
2480 	val = allow ? mask : 0;
2481 
2482 	err = vlv_wait_for_pw_status(dev_priv, mask, val);
2483 	if (err)
2484 		DRM_ERROR("timeout disabling GT waking\n");
2485 
2486 	return err;
2487 }
2488 
vlv_wait_for_gt_wells(struct drm_i915_private * dev_priv,bool wait_for_on)2489 static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
2490 				  bool wait_for_on)
2491 {
2492 	u32 mask;
2493 	u32 val;
2494 
2495 	mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
2496 	val = wait_for_on ? mask : 0;
2497 
2498 	/*
2499 	 * RC6 transitioning can be delayed up to 2 msec (see
2500 	 * valleyview_enable_rps), use 3 msec for safety.
2501 	 *
2502 	 * This can fail to turn off the rc6 if the GPU is stuck after a failed
2503 	 * reset and we are trying to force the machine to sleep.
2504 	 */
2505 	if (vlv_wait_for_pw_status(dev_priv, mask, val))
2506 		DRM_DEBUG_DRIVER("timeout waiting for GT wells to go %s\n",
2507 				 onoff(wait_for_on));
2508 }
2509 
vlv_check_no_gt_access(struct drm_i915_private * dev_priv)2510 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
2511 {
2512 	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
2513 		return;
2514 
2515 	DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
2516 	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
2517 }
2518 
vlv_suspend_complete(struct drm_i915_private * dev_priv)2519 static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
2520 {
2521 	u32 mask;
2522 	int err;
2523 
2524 	/*
2525 	 * Bspec defines the following GT well on flags as debug only, so
2526 	 * don't treat them as hard failures.
2527 	 */
2528 	vlv_wait_for_gt_wells(dev_priv, false);
2529 
2530 	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
2531 	WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
2532 
2533 	vlv_check_no_gt_access(dev_priv);
2534 
2535 	err = vlv_force_gfx_clock(dev_priv, true);
2536 	if (err)
2537 		goto err1;
2538 
2539 	err = vlv_allow_gt_wake(dev_priv, false);
2540 	if (err)
2541 		goto err2;
2542 
2543 	vlv_save_gunit_s0ix_state(dev_priv);
2544 
2545 	err = vlv_force_gfx_clock(dev_priv, false);
2546 	if (err)
2547 		goto err2;
2548 
2549 	return 0;
2550 
2551 err2:
2552 	/* For safety always re-enable waking and disable gfx clock forcing */
2553 	vlv_allow_gt_wake(dev_priv, true);
2554 err1:
2555 	vlv_force_gfx_clock(dev_priv, false);
2556 
2557 	return err;
2558 }
2559 
vlv_resume_prepare(struct drm_i915_private * dev_priv,bool rpm_resume)2560 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
2561 				bool rpm_resume)
2562 {
2563 	int err;
2564 	int ret;
2565 
2566 	/*
2567 	 * If any of the steps fail just try to continue, that's the best we
2568 	 * can do at this point. Return the first error code (which will also
2569 	 * leave RPM permanently disabled).
2570 	 */
2571 	ret = vlv_force_gfx_clock(dev_priv, true);
2572 
2573 	vlv_restore_gunit_s0ix_state(dev_priv);
2574 
2575 	err = vlv_allow_gt_wake(dev_priv, true);
2576 	if (!ret)
2577 		ret = err;
2578 
2579 	err = vlv_force_gfx_clock(dev_priv, false);
2580 	if (!ret)
2581 		ret = err;
2582 
2583 	vlv_check_no_gt_access(dev_priv);
2584 
2585 	if (rpm_resume)
2586 		intel_init_clock_gating(dev_priv);
2587 
2588 	return ret;
2589 }
2590 
intel_runtime_suspend(struct device * kdev)2591 static int intel_runtime_suspend(struct device *kdev)
2592 {
2593 	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
2594 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2595 	int ret = 0;
2596 
2597 	if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
2598 		return -ENODEV;
2599 
2600 	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
2601 		return -ENODEV;
2602 
2603 	DRM_DEBUG_KMS("Suspending device\n");
2604 
2605 	disable_rpm_wakeref_asserts(rpm);
2606 
2607 	/*
2608 	 * We are safe here against re-faults, since the fault handler takes
2609 	 * an RPM reference.
2610 	 */
2611 	i915_gem_runtime_suspend(dev_priv);
2612 
2613 	intel_gt_runtime_suspend(&dev_priv->gt);
2614 
2615 	intel_runtime_pm_disable_interrupts(dev_priv);
2616 
2617 	intel_uncore_suspend(&dev_priv->uncore);
2618 
2619 	intel_display_power_suspend(dev_priv);
2620 
2621 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2622 		ret = vlv_suspend_complete(dev_priv);
2623 
2624 	if (ret) {
2625 		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
2626 		intel_uncore_runtime_resume(&dev_priv->uncore);
2627 
2628 		intel_runtime_pm_enable_interrupts(dev_priv);
2629 
2630 		intel_gt_runtime_resume(&dev_priv->gt);
2631 
2632 		i915_gem_restore_fences(dev_priv);
2633 
2634 		enable_rpm_wakeref_asserts(rpm);
2635 
2636 		return ret;
2637 	}
2638 
2639 	enable_rpm_wakeref_asserts(rpm);
2640 	intel_runtime_pm_driver_release(rpm);
2641 
2642 	if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
2643 		DRM_ERROR("Unclaimed access detected prior to suspending\n");
2644 
2645 	rpm->suspended = true;
2646 
2647 	/*
2648 	 * FIXME: We really should find a document that references the arguments
2649 	 * used below!
2650 	 */
2651 	if (IS_BROADWELL(dev_priv)) {
2652 		/*
2653 		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
2654 		 * being detected, and the call we do at intel_runtime_resume()
2655 		 * won't be able to restore them. Since PCI_D3hot matches the
2656 		 * actual specification and appears to be working, use it.
2657 		 */
2658 		intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
2659 	} else {
2660 		/*
2661 		 * current versions of firmware which depend on this opregion
2662 		 * notification have repurposed the D1 definition to mean
2663 		 * "runtime suspended" vs. what you would normally expect (D3)
2664 		 * to distinguish it from notifications that might be sent via
2665 		 * the suspend path.
2666 		 */
2667 		intel_opregion_notify_adapter(dev_priv, PCI_D1);
2668 	}
2669 
2670 	assert_forcewakes_inactive(&dev_priv->uncore);
2671 
2672 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2673 		intel_hpd_poll_init(dev_priv);
2674 
2675 	DRM_DEBUG_KMS("Device suspended\n");
2676 	return 0;
2677 }
2678 
intel_runtime_resume(struct device * kdev)2679 static int intel_runtime_resume(struct device *kdev)
2680 {
2681 	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
2682 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2683 	int ret = 0;
2684 
2685 	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
2686 		return -ENODEV;
2687 
2688 	DRM_DEBUG_KMS("Resuming device\n");
2689 
2690 	WARN_ON_ONCE(atomic_read(&rpm->wakeref_count));
2691 	disable_rpm_wakeref_asserts(rpm);
2692 
2693 	intel_opregion_notify_adapter(dev_priv, PCI_D0);
2694 	rpm->suspended = false;
2695 	if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
2696 		DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
2697 
2698 	intel_display_power_resume(dev_priv);
2699 
2700 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2701 		ret = vlv_resume_prepare(dev_priv, true);
2702 
2703 	intel_uncore_runtime_resume(&dev_priv->uncore);
2704 
2705 	intel_runtime_pm_enable_interrupts(dev_priv);
2706 
2707 	/*
2708 	 * No point of rolling back things in case of an error, as the best
2709 	 * we can do is to hope that things will still work (and disable RPM).
2710 	 */
2711 	intel_gt_runtime_resume(&dev_priv->gt);
2712 	i915_gem_restore_fences(dev_priv);
2713 
2714 	/*
2715 	 * On VLV/CHV display interrupts are part of the display
2716 	 * power well, so hpd is reinitialized from there. For
2717 	 * everyone else do it here.
2718 	 */
2719 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2720 		intel_hpd_init(dev_priv);
2721 
2722 	intel_enable_ipc(dev_priv);
2723 
2724 	enable_rpm_wakeref_asserts(rpm);
2725 
2726 	if (ret)
2727 		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
2728 	else
2729 		DRM_DEBUG_KMS("Device resumed\n");
2730 
2731 	return ret;
2732 }
2733 
2734 const struct dev_pm_ops i915_pm_ops = {
2735 	/*
2736 	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
2737 	 * PMSG_RESUME]
2738 	 */
2739 	.prepare = i915_pm_prepare,
2740 	.suspend = i915_pm_suspend,
2741 	.suspend_late = i915_pm_suspend_late,
2742 	.resume_early = i915_pm_resume_early,
2743 	.resume = i915_pm_resume,
2744 
2745 	/*
2746 	 * S4 event handlers
2747 	 * @freeze, @freeze_late    : called (1) before creating the
2748 	 *                            hibernation image [PMSG_FREEZE] and
2749 	 *                            (2) after rebooting, before restoring
2750 	 *                            the image [PMSG_QUIESCE]
2751 	 * @thaw, @thaw_early       : called (1) after creating the hibernation
2752 	 *                            image, before writing it [PMSG_THAW]
2753 	 *                            and (2) after failing to create or
2754 	 *                            restore the image [PMSG_RECOVER]
2755 	 * @poweroff, @poweroff_late: called after writing the hibernation
2756 	 *                            image, before rebooting [PMSG_HIBERNATE]
2757 	 * @restore, @restore_early : called after rebooting and restoring the
2758 	 *                            hibernation image [PMSG_RESTORE]
2759 	 */
2760 	.freeze = i915_pm_freeze,
2761 	.freeze_late = i915_pm_freeze_late,
2762 	.thaw_early = i915_pm_thaw_early,
2763 	.thaw = i915_pm_thaw,
2764 	.poweroff = i915_pm_suspend,
2765 	.poweroff_late = i915_pm_poweroff_late,
2766 	.restore_early = i915_pm_restore_early,
2767 	.restore = i915_pm_restore,
2768 
2769 	/* S0ix (via runtime suspend) event handlers */
2770 	.runtime_suspend = intel_runtime_suspend,
2771 	.runtime_resume = intel_runtime_resume,
2772 };
2773 
2774 static const struct vm_operations_struct i915_gem_vm_ops = {
2775 	.fault = i915_gem_fault,
2776 	.open = drm_gem_vm_open,
2777 	.close = drm_gem_vm_close,
2778 };
2779 
2780 static const struct file_operations i915_driver_fops = {
2781 	.owner = THIS_MODULE,
2782 	.open = drm_open,
2783 	.release = drm_release,
2784 	.unlocked_ioctl = drm_ioctl,
2785 	.mmap = drm_gem_mmap,
2786 	.poll = drm_poll,
2787 	.read = drm_read,
2788 	.compat_ioctl = i915_compat_ioctl,
2789 	.llseek = noop_llseek,
2790 };
2791 
2792 static int
i915_gem_reject_pin_ioctl(struct drm_device * dev,void * data,struct drm_file * file)2793 i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
2794 			  struct drm_file *file)
2795 {
2796 	return -ENODEV;
2797 }
2798 
2799 static const struct drm_ioctl_desc i915_ioctls[] = {
2800 	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2801 	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
2802 	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
2803 	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
2804 	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
2805 	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
2806 	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
2807 	DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2808 	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
2809 	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
2810 	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2811 	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
2812 	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2813 	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2814 	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
2815 	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
2816 	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2817 	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2818 	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
2819 	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
2820 	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2821 	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2822 	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
2823 	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
2824 	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
2825 	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
2826 	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2827 	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2828 	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
2829 	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
2830 	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
2831 	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
2832 	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
2833 	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
2834 	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
2835 	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
2836 	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
2837 	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
2838 	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
2839 	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
2840 	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
2841 	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
2842 	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
2843 	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
2844 	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
2845 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
2846 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
2847 	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
2848 	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
2849 	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
2850 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
2851 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
2852 	DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
2853 	DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
2854 	DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
2855 	DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
2856 	DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
2857 	DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
2858 };
2859 
2860 static struct drm_driver driver = {
2861 	/* Don't use MTRRs here; the Xserver or userspace app should
2862 	 * deal with them for Intel hardware.
2863 	 */
2864 	.driver_features =
2865 	    DRIVER_GEM |
2866 	    DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
2867 	.release = i915_driver_release,
2868 	.open = i915_driver_open,
2869 	.lastclose = i915_driver_lastclose,
2870 	.postclose = i915_driver_postclose,
2871 
2872 	.gem_close_object = i915_gem_close_object,
2873 	.gem_free_object_unlocked = i915_gem_free_object,
2874 	.gem_vm_ops = &i915_gem_vm_ops,
2875 
2876 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
2877 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
2878 	.gem_prime_export = i915_gem_prime_export,
2879 	.gem_prime_import = i915_gem_prime_import,
2880 
2881 	.get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
2882 	.get_scanout_position = i915_get_crtc_scanoutpos,
2883 
2884 	.dumb_create = i915_gem_dumb_create,
2885 	.dumb_map_offset = i915_gem_mmap_gtt,
2886 	.ioctls = i915_ioctls,
2887 	.num_ioctls = ARRAY_SIZE(i915_ioctls),
2888 	.fops = &i915_driver_fops,
2889 	.name = DRIVER_NAME,
2890 	.desc = DRIVER_DESC,
2891 	.date = DRIVER_DATE,
2892 	.major = DRIVER_MAJOR,
2893 	.minor = DRIVER_MINOR,
2894 	.patchlevel = DRIVER_PATCHLEVEL,
2895 };
2896 
2897 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2898 #include "selftests/mock_drm.c"
2899 #endif
2900