1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29 
30 #ifndef _I915_DRV_H_
31 #define _I915_DRV_H_
32 
33 #include <uapi/drm/i915_drm.h>
34 #include <uapi/drm/drm_fourcc.h>
35 
36 #include <linux/io-mapping.h>
37 #include <linux/i2c.h>
38 #include <linux/i2c-algo-bit.h>
39 #include <linux/backlight.h>
40 #include <linux/hash.h>
41 #include <linux/intel-iommu.h>
42 #include <linux/kref.h>
43 #include <linux/mm_types.h>
44 #include <linux/perf_event.h>
45 #include <linux/pm_qos.h>
46 #include <linux/dma-resv.h>
47 #include <linux/shmem_fs.h>
48 #include <linux/stackdepot.h>
49 
50 #include <drm/intel-gtt.h>
51 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */
52 #include <drm/drm_gem.h>
53 #include <drm/drm_auth.h>
54 #include <drm/drm_cache.h>
55 #include <drm/drm_util.h>
56 #include <drm/drm_dsc.h>
57 #include <drm/drm_atomic.h>
58 #include <drm/drm_connector.h>
59 #include <drm/i915_mei_hdcp_interface.h>
60 
61 #include "i915_fixed.h"
62 #include "i915_params.h"
63 #include "i915_reg.h"
64 #include "i915_utils.h"
65 
66 #include "display/intel_bios.h"
67 #include "display/intel_display.h"
68 #include "display/intel_display_power.h"
69 #include "display/intel_dpll_mgr.h"
70 #include "display/intel_frontbuffer.h"
71 #include "display/intel_gmbus.h"
72 #include "display/intel_opregion.h"
73 
74 #include "gem/i915_gem_context_types.h"
75 #include "gem/i915_gem_shrinker.h"
76 #include "gem/i915_gem_stolen.h"
77 
78 #include "gt/intel_lrc.h"
79 #include "gt/intel_engine.h"
80 #include "gt/intel_gt_types.h"
81 #include "gt/intel_workarounds.h"
82 #include "gt/uc/intel_uc.h"
83 
84 #include "intel_device_info.h"
85 #include "intel_pch.h"
86 #include "intel_runtime_pm.h"
87 #include "intel_uncore.h"
88 #include "intel_wakeref.h"
89 #include "intel_wopcm.h"
90 
91 #include "i915_gem.h"
92 #include "i915_gem_fence_reg.h"
93 #include "i915_gem_gtt.h"
94 #include "i915_gpu_error.h"
95 #include "i915_request.h"
96 #include "i915_scheduler.h"
97 #include "gt/intel_timeline.h"
98 #include "i915_vma.h"
99 #include "i915_irq.h"
100 
101 #include "intel_gvt.h"
102 
103 /* General customization:
104  */
105 
106 #define DRIVER_NAME		"i915"
107 #define DRIVER_DESC		"Intel Graphics"
108 #define DRIVER_DATE		"20190822"
109 #define DRIVER_TIMESTAMP	1566477988
110 
111 struct drm_i915_gem_object;
112 
113 enum hpd_pin {
114 	HPD_NONE = 0,
115 	HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
116 	HPD_CRT,
117 	HPD_SDVO_B,
118 	HPD_SDVO_C,
119 	HPD_PORT_A,
120 	HPD_PORT_B,
121 	HPD_PORT_C,
122 	HPD_PORT_D,
123 	HPD_PORT_E,
124 	HPD_PORT_F,
125 	HPD_PORT_G,
126 	HPD_PORT_H,
127 	HPD_PORT_I,
128 
129 	HPD_NUM_PINS
130 };
131 
132 #define for_each_hpd_pin(__pin) \
133 	for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
134 
135 /* Threshold == 5 for long IRQs, 50 for short */
136 #define HPD_STORM_DEFAULT_THRESHOLD 50
137 
138 struct i915_hotplug {
139 	struct delayed_work hotplug_work;
140 
141 	struct {
142 		unsigned long last_jiffies;
143 		int count;
144 		enum {
145 			HPD_ENABLED = 0,
146 			HPD_DISABLED = 1,
147 			HPD_MARK_DISABLED = 2
148 		} state;
149 	} stats[HPD_NUM_PINS];
150 	u32 event_bits;
151 	u32 retry_bits;
152 	struct delayed_work reenable_work;
153 
154 	u32 long_port_mask;
155 	u32 short_port_mask;
156 	struct work_struct dig_port_work;
157 
158 	struct work_struct poll_init_work;
159 	bool poll_enabled;
160 
161 	unsigned int hpd_storm_threshold;
162 	/* Whether or not to count short HPD IRQs in HPD storms */
163 	u8 hpd_short_storm_enabled;
164 
165 	/*
166 	 * if we get a HPD irq from DP and a HPD irq from non-DP
167 	 * the non-DP HPD could block the workqueue on a mode config
168 	 * mutex getting, that userspace may have taken. However
169 	 * userspace is waiting on the DP workqueue to run which is
170 	 * blocked behind the non-DP one.
171 	 */
172 	struct workqueue_struct *dp_wq;
173 };
174 
175 #define I915_GEM_GPU_DOMAINS \
176 	(I915_GEM_DOMAIN_RENDER | \
177 	 I915_GEM_DOMAIN_SAMPLER | \
178 	 I915_GEM_DOMAIN_COMMAND | \
179 	 I915_GEM_DOMAIN_INSTRUCTION | \
180 	 I915_GEM_DOMAIN_VERTEX)
181 
182 struct drm_i915_private;
183 struct i915_mm_struct;
184 struct i915_mmu_object;
185 
186 struct drm_i915_file_private {
187 	struct drm_i915_private *dev_priv;
188 	struct drm_file *file;
189 
190 	struct {
191 		spinlock_t lock;
192 		struct list_head request_list;
193 	} mm;
194 
195 	struct idr context_idr;
196 	struct mutex context_idr_lock; /* guards context_idr */
197 
198 	struct idr vm_idr;
199 	struct mutex vm_idr_lock; /* guards vm_idr */
200 
201 	unsigned int bsd_engine;
202 
203 /*
204  * Every context ban increments per client ban score. Also
205  * hangs in short succession increments ban score. If ban threshold
206  * is reached, client is considered banned and submitting more work
207  * will fail. This is a stop gap measure to limit the badly behaving
208  * clients access to gpu. Note that unbannable contexts never increment
209  * the client ban score.
210  */
211 #define I915_CLIENT_SCORE_HANG_FAST	1
212 #define   I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
213 #define I915_CLIENT_SCORE_CONTEXT_BAN   3
214 #define I915_CLIENT_SCORE_BANNED	9
215 	/** ban_score: Accumulated score of all ctx bans and fast hangs. */
216 	atomic_t ban_score;
217 	unsigned long hang_timestamp;
218 };
219 
220 /* Interface history:
221  *
222  * 1.1: Original.
223  * 1.2: Add Power Management
224  * 1.3: Add vblank support
225  * 1.4: Fix cmdbuffer path, add heap destroy
226  * 1.5: Add vblank pipe configuration
227  * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
228  *      - Support vertical blank on secondary display pipe
229  */
230 #define DRIVER_MAJOR		1
231 #define DRIVER_MINOR		6
232 #define DRIVER_PATCHLEVEL	0
233 
234 struct intel_overlay;
235 struct intel_overlay_error_state;
236 
237 struct sdvo_device_mapping {
238 	u8 initialized;
239 	u8 dvo_port;
240 	u8 slave_addr;
241 	u8 dvo_wiring;
242 	u8 i2c_pin;
243 	u8 ddc_pin;
244 };
245 
246 struct intel_connector;
247 struct intel_encoder;
248 struct intel_atomic_state;
249 struct intel_crtc_state;
250 struct intel_initial_plane_config;
251 struct intel_crtc;
252 struct intel_limit;
253 struct dpll;
254 struct intel_cdclk_state;
255 
256 struct drm_i915_display_funcs {
257 	void (*get_cdclk)(struct drm_i915_private *dev_priv,
258 			  struct intel_cdclk_state *cdclk_state);
259 	void (*set_cdclk)(struct drm_i915_private *dev_priv,
260 			  const struct intel_cdclk_state *cdclk_state,
261 			  enum pipe pipe);
262 	int (*get_fifo_size)(struct drm_i915_private *dev_priv,
263 			     enum i9xx_plane_id i9xx_plane);
264 	int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state);
265 	int (*compute_intermediate_wm)(struct intel_crtc_state *crtc_state);
266 	void (*initial_watermarks)(struct intel_atomic_state *state,
267 				   struct intel_crtc_state *crtc_state);
268 	void (*atomic_update_watermarks)(struct intel_atomic_state *state,
269 					 struct intel_crtc_state *crtc_state);
270 	void (*optimize_watermarks)(struct intel_atomic_state *state,
271 				    struct intel_crtc_state *crtc_state);
272 	int (*compute_global_watermarks)(struct intel_atomic_state *state);
273 	void (*update_wm)(struct intel_crtc *crtc);
274 	int (*modeset_calc_cdclk)(struct intel_atomic_state *state);
275 	/* Returns the active state of the crtc, and if the crtc is active,
276 	 * fills out the pipe-config with the hw state. */
277 	bool (*get_pipe_config)(struct intel_crtc *,
278 				struct intel_crtc_state *);
279 	void (*get_initial_plane_config)(struct intel_crtc *,
280 					 struct intel_initial_plane_config *);
281 	int (*crtc_compute_clock)(struct intel_crtc *crtc,
282 				  struct intel_crtc_state *crtc_state);
283 	void (*crtc_enable)(struct intel_crtc_state *pipe_config,
284 			    struct intel_atomic_state *old_state);
285 	void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
286 			     struct intel_atomic_state *old_state);
287 	void (*update_crtcs)(struct intel_atomic_state *state);
288 	void (*audio_codec_enable)(struct intel_encoder *encoder,
289 				   const struct intel_crtc_state *crtc_state,
290 				   const struct drm_connector_state *conn_state);
291 	void (*audio_codec_disable)(struct intel_encoder *encoder,
292 				    const struct intel_crtc_state *old_crtc_state,
293 				    const struct drm_connector_state *old_conn_state);
294 	void (*fdi_link_train)(struct intel_crtc *crtc,
295 			       const struct intel_crtc_state *crtc_state);
296 	void (*init_clock_gating)(struct drm_i915_private *dev_priv);
297 	void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
298 	/* clock updates for mode set */
299 	/* cursor updates */
300 	/* render clock increase/decrease */
301 	/* display clock increase/decrease */
302 	/* pll clock increase/decrease */
303 
304 	int (*color_check)(struct intel_crtc_state *crtc_state);
305 	/*
306 	 * Program double buffered color management registers during
307 	 * vblank evasion. The registers should then latch during the
308 	 * next vblank start, alongside any other double buffered registers
309 	 * involved with the same commit.
310 	 */
311 	void (*color_commit)(const struct intel_crtc_state *crtc_state);
312 	/*
313 	 * Load LUTs (and other single buffered color management
314 	 * registers). Will (hopefully) be called during the vblank
315 	 * following the latching of any double buffered registers
316 	 * involved with the same commit.
317 	 */
318 	void (*load_luts)(const struct intel_crtc_state *crtc_state);
319 	void (*read_luts)(struct intel_crtc_state *crtc_state);
320 };
321 
322 struct intel_csr {
323 	struct work_struct work;
324 	const char *fw_path;
325 	u32 required_version;
326 	u32 max_fw_size; /* bytes */
327 	u32 *dmc_payload;
328 	u32 dmc_fw_size; /* dwords */
329 	u32 version;
330 	u32 mmio_count;
331 	i915_reg_t mmioaddr[20];
332 	u32 mmiodata[20];
333 	u32 dc_state;
334 	u32 allowed_dc_mask;
335 	intel_wakeref_t wakeref;
336 };
337 
338 enum i915_cache_level {
339 	I915_CACHE_NONE = 0,
340 	I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
341 	I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
342 			      caches, eg sampler/render caches, and the
343 			      large Last-Level-Cache. LLC is coherent with
344 			      the CPU, but L3 is only visible to the GPU. */
345 	I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
346 };
347 
348 #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
349 
350 struct intel_fbc {
351 	/* This is always the inner lock when overlapping with struct_mutex and
352 	 * it's the outer lock when overlapping with stolen_lock. */
353 	struct mutex lock;
354 	unsigned threshold;
355 	unsigned int possible_framebuffer_bits;
356 	unsigned int busy_bits;
357 	unsigned int visible_pipes_mask;
358 	struct intel_crtc *crtc;
359 
360 	struct drm_mm_node compressed_fb;
361 	struct drm_mm_node *compressed_llb;
362 
363 	bool false_color;
364 
365 	bool enabled;
366 	bool active;
367 	bool flip_pending;
368 
369 	bool underrun_detected;
370 	struct work_struct underrun_work;
371 
372 	/*
373 	 * Due to the atomic rules we can't access some structures without the
374 	 * appropriate locking, so we cache information here in order to avoid
375 	 * these problems.
376 	 */
377 	struct intel_fbc_state_cache {
378 		struct i915_vma *vma;
379 		unsigned long flags;
380 
381 		struct {
382 			unsigned int mode_flags;
383 			u32 hsw_bdw_pixel_rate;
384 		} crtc;
385 
386 		struct {
387 			unsigned int rotation;
388 			int src_w;
389 			int src_h;
390 			bool visible;
391 			/*
392 			 * Display surface base address adjustement for
393 			 * pageflips. Note that on gen4+ this only adjusts up
394 			 * to a tile, offsets within a tile are handled in
395 			 * the hw itself (with the TILEOFF register).
396 			 */
397 			int adjusted_x;
398 			int adjusted_y;
399 
400 			int y;
401 
402 			u16 pixel_blend_mode;
403 		} plane;
404 
405 		struct {
406 			const struct drm_format_info *format;
407 			unsigned int stride;
408 		} fb;
409 	} state_cache;
410 
411 	/*
412 	 * This structure contains everything that's relevant to program the
413 	 * hardware registers. When we want to figure out if we need to disable
414 	 * and re-enable FBC for a new configuration we just check if there's
415 	 * something different in the struct. The genx_fbc_activate functions
416 	 * are supposed to read from it in order to program the registers.
417 	 */
418 	struct intel_fbc_reg_params {
419 		struct i915_vma *vma;
420 		unsigned long flags;
421 
422 		struct {
423 			enum pipe pipe;
424 			enum i9xx_plane_id i9xx_plane;
425 			unsigned int fence_y_offset;
426 		} crtc;
427 
428 		struct {
429 			const struct drm_format_info *format;
430 			unsigned int stride;
431 		} fb;
432 
433 		int cfb_size;
434 		unsigned int gen9_wa_cfb_stride;
435 	} params;
436 
437 	const char *no_fbc_reason;
438 };
439 
440 /*
441  * HIGH_RR is the highest eDP panel refresh rate read from EDID
442  * LOW_RR is the lowest eDP panel refresh rate found from EDID
443  * parsing for same resolution.
444  */
445 enum drrs_refresh_rate_type {
446 	DRRS_HIGH_RR,
447 	DRRS_LOW_RR,
448 	DRRS_MAX_RR, /* RR count */
449 };
450 
451 enum drrs_support_type {
452 	DRRS_NOT_SUPPORTED = 0,
453 	STATIC_DRRS_SUPPORT = 1,
454 	SEAMLESS_DRRS_SUPPORT = 2
455 };
456 
457 struct intel_dp;
458 struct i915_drrs {
459 	struct mutex mutex;
460 	struct delayed_work work;
461 	struct intel_dp *dp;
462 	unsigned busy_frontbuffer_bits;
463 	enum drrs_refresh_rate_type refresh_rate_type;
464 	enum drrs_support_type type;
465 };
466 
467 struct i915_psr {
468 	struct mutex lock;
469 
470 #define I915_PSR_DEBUG_MODE_MASK	0x0f
471 #define I915_PSR_DEBUG_DEFAULT		0x00
472 #define I915_PSR_DEBUG_DISABLE		0x01
473 #define I915_PSR_DEBUG_ENABLE		0x02
474 #define I915_PSR_DEBUG_FORCE_PSR1	0x03
475 #define I915_PSR_DEBUG_IRQ		0x10
476 
477 	u32 debug;
478 	bool sink_support;
479 	bool enabled;
480 	struct intel_dp *dp;
481 	enum pipe pipe;
482 	bool active;
483 	struct work_struct work;
484 	unsigned busy_frontbuffer_bits;
485 	bool sink_psr2_support;
486 	bool link_standby;
487 	bool colorimetry_support;
488 	bool psr2_enabled;
489 	u8 sink_sync_latency;
490 	ktime_t last_entry_attempt;
491 	ktime_t last_exit;
492 	bool sink_not_reliable;
493 	bool irq_aux_error;
494 	u16 su_x_granularity;
495 };
496 
497 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
498 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
499 #define QUIRK_BACKLIGHT_PRESENT (1<<3)
500 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
501 #define QUIRK_INCREASE_T12_DELAY (1<<6)
502 #define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
503 
504 struct intel_fbdev;
505 struct intel_fbc_work;
506 
507 struct intel_gmbus {
508 	struct i2c_adapter adapter;
509 #define GMBUS_FORCE_BIT_RETRY (1U << 31)
510 	u32 force_bit;
511 	u32 reg0;
512 	i915_reg_t gpio_reg;
513 	struct i2c_algo_bit_data bit_algo;
514 	struct drm_i915_private *dev_priv;
515 };
516 
517 struct i915_suspend_saved_registers {
518 	u32 saveDSPARB;
519 	u32 saveFBC_CONTROL;
520 	u32 saveCACHE_MODE_0;
521 	u32 saveMI_ARB_STATE;
522 	u32 saveSWF0[16];
523 	u32 saveSWF1[16];
524 	u32 saveSWF3[3];
525 	u64 saveFENCE[I915_MAX_NUM_FENCES];
526 	u32 savePCH_PORT_HOTPLUG;
527 	u16 saveGCDGMBUS;
528 };
529 
530 struct vlv_s0ix_state;
531 
532 struct intel_rps_ei {
533 	ktime_t ktime;
534 	u32 render_c0;
535 	u32 media_c0;
536 };
537 
538 struct intel_rps {
539 	struct mutex lock; /* protects enabling and the worker */
540 
541 	/*
542 	 * work, interrupts_enabled and pm_iir are protected by
543 	 * dev_priv->irq_lock
544 	 */
545 	struct work_struct work;
546 	bool interrupts_enabled;
547 	u32 pm_iir;
548 
549 	/* PM interrupt bits that should never be masked */
550 	u32 pm_intrmsk_mbz;
551 
552 	/* Frequencies are stored in potentially platform dependent multiples.
553 	 * In other words, *_freq needs to be multiplied by X to be interesting.
554 	 * Soft limits are those which are used for the dynamic reclocking done
555 	 * by the driver (raise frequencies under heavy loads, and lower for
556 	 * lighter loads). Hard limits are those imposed by the hardware.
557 	 *
558 	 * A distinction is made for overclocking, which is never enabled by
559 	 * default, and is considered to be above the hard limit if it's
560 	 * possible at all.
561 	 */
562 	u8 cur_freq;		/* Current frequency (cached, may not == HW) */
563 	u8 min_freq_softlimit;	/* Minimum frequency permitted by the driver */
564 	u8 max_freq_softlimit;	/* Max frequency permitted by the driver */
565 	u8 max_freq;		/* Maximum frequency, RP0 if not overclocking */
566 	u8 min_freq;		/* AKA RPn. Minimum frequency */
567 	u8 boost_freq;		/* Frequency to request when wait boosting */
568 	u8 idle_freq;		/* Frequency to request when we are idle */
569 	u8 efficient_freq;	/* AKA RPe. Pre-determined balanced frequency */
570 	u8 rp1_freq;		/* "less than" RP0 power/freqency */
571 	u8 rp0_freq;		/* Non-overclocked max frequency. */
572 	u16 gpll_ref_freq;	/* vlv/chv GPLL reference frequency */
573 
574 	int last_adj;
575 
576 	struct {
577 		struct mutex mutex;
578 
579 		enum { LOW_POWER, BETWEEN, HIGH_POWER } mode;
580 		unsigned int interactive;
581 
582 		u8 up_threshold; /* Current %busy required to uplock */
583 		u8 down_threshold; /* Current %busy required to downclock */
584 	} power;
585 
586 	bool enabled;
587 	atomic_t num_waiters;
588 	atomic_t boosts;
589 
590 	/* manual wa residency calculations */
591 	struct intel_rps_ei ei;
592 };
593 
594 struct intel_rc6 {
595 	bool enabled;
596 	bool ctx_corrupted;
597 	intel_wakeref_t ctx_corrupted_wakeref;
598 	u64 prev_hw_residency[4];
599 	u64 cur_residency[4];
600 };
601 
602 struct intel_llc_pstate {
603 	bool enabled;
604 };
605 
606 struct intel_gen6_power_mgmt {
607 	struct intel_rps rps;
608 	struct intel_rc6 rc6;
609 	struct intel_llc_pstate llc_pstate;
610 };
611 
612 /* defined intel_pm.c */
613 extern spinlock_t mchdev_lock;
614 
615 struct intel_ilk_power_mgmt {
616 	u8 cur_delay;
617 	u8 min_delay;
618 	u8 max_delay;
619 	u8 fmax;
620 	u8 fstart;
621 
622 	u64 last_count1;
623 	unsigned long last_time1;
624 	unsigned long chipset_power;
625 	u64 last_count2;
626 	u64 last_time2;
627 	unsigned long gfx_power;
628 	u8 corr;
629 
630 	int c_m;
631 	int r_t;
632 };
633 
634 #define MAX_L3_SLICES 2
635 struct intel_l3_parity {
636 	u32 *remap_info[MAX_L3_SLICES];
637 	struct work_struct error_work;
638 	int which_slice;
639 };
640 
641 struct i915_gem_mm {
642 	/** Memory allocator for GTT stolen memory */
643 	struct drm_mm stolen;
644 	/** Protects the usage of the GTT stolen memory allocator. This is
645 	 * always the inner lock when overlapping with struct_mutex. */
646 	struct mutex stolen_lock;
647 
648 	/* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
649 	spinlock_t obj_lock;
650 
651 	/**
652 	 * List of objects which are purgeable.
653 	 */
654 	struct list_head purge_list;
655 
656 	/**
657 	 * List of objects which have allocated pages and are shrinkable.
658 	 */
659 	struct list_head shrink_list;
660 
661 	/**
662 	 * List of objects which are pending destruction.
663 	 */
664 	struct llist_head free_list;
665 	struct work_struct free_work;
666 	/**
667 	 * Count of objects pending destructions. Used to skip needlessly
668 	 * waiting on an RCU barrier if no objects are waiting to be freed.
669 	 */
670 	atomic_t free_count;
671 
672 	/**
673 	 * Small stash of WC pages
674 	 */
675 	struct pagestash wc_stash;
676 
677 	/**
678 	 * tmpfs instance used for shmem backed objects
679 	 */
680 	struct vfsmount *gemfs;
681 
682 	struct notifier_block oom_notifier;
683 	struct notifier_block vmap_notifier;
684 	struct shrinker shrinker;
685 
686 	/**
687 	 * Workqueue to fault in userptr pages, flushed by the execbuf
688 	 * when required but otherwise left to userspace to try again
689 	 * on EAGAIN.
690 	 */
691 	struct workqueue_struct *userptr_wq;
692 
693 	/** Bit 6 swizzling required for X tiling */
694 	u32 bit_6_swizzle_x;
695 	/** Bit 6 swizzling required for Y tiling */
696 	u32 bit_6_swizzle_y;
697 
698 	/* shrinker accounting, also useful for userland debugging */
699 	u64 shrink_memory;
700 	u32 shrink_count;
701 };
702 
703 #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
704 
705 #define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
706 #define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
707 
708 #define I915_ENGINE_DEAD_TIMEOUT  (4 * HZ)  /* Seqno, head and subunits dead */
709 #define I915_SEQNO_DEAD_TIMEOUT   (12 * HZ) /* Seqno dead with active head */
710 
711 #define I915_ENGINE_WEDGED_TIMEOUT  (60 * HZ)  /* Reset but no recovery? */
712 
713 struct ddi_vbt_port_info {
714 	/* Non-NULL if port present. */
715 	const struct child_device_config *child;
716 
717 	int max_tmds_clock;
718 
719 	/*
720 	 * This is an index in the HDMI/DVI DDI buffer translation table.
721 	 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
722 	 * populate this field.
723 	 */
724 #define HDMI_LEVEL_SHIFT_UNKNOWN	0xff
725 	u8 hdmi_level_shift;
726 
727 	u8 supports_dvi:1;
728 	u8 supports_hdmi:1;
729 	u8 supports_dp:1;
730 	u8 supports_edp:1;
731 	u8 supports_typec_usb:1;
732 	u8 supports_tbt:1;
733 
734 	u8 alternate_aux_channel;
735 	u8 alternate_ddc_pin;
736 
737 	u8 dp_boost_level;
738 	u8 hdmi_boost_level;
739 	int dp_max_link_rate;		/* 0 for not limited by VBT */
740 };
741 
742 enum psr_lines_to_wait {
743 	PSR_0_LINES_TO_WAIT = 0,
744 	PSR_1_LINE_TO_WAIT,
745 	PSR_4_LINES_TO_WAIT,
746 	PSR_8_LINES_TO_WAIT
747 };
748 
749 struct intel_vbt_data {
750 	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
751 	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
752 
753 	/* Feature bits */
754 	unsigned int int_tv_support:1;
755 	unsigned int lvds_dither:1;
756 	unsigned int int_crt_support:1;
757 	unsigned int lvds_use_ssc:1;
758 	unsigned int int_lvds_support:1;
759 	unsigned int display_clock_mode:1;
760 	unsigned int fdi_rx_polarity_inverted:1;
761 	unsigned int panel_type:4;
762 	int lvds_ssc_freq;
763 	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
764 	enum drm_panel_orientation orientation;
765 
766 	enum drrs_support_type drrs_type;
767 
768 	struct {
769 		int rate;
770 		int lanes;
771 		int preemphasis;
772 		int vswing;
773 		bool low_vswing;
774 		bool initialized;
775 		int bpp;
776 		struct edp_power_seq pps;
777 	} edp;
778 
779 	struct {
780 		bool enable;
781 		bool full_link;
782 		bool require_aux_wakeup;
783 		int idle_frames;
784 		enum psr_lines_to_wait lines_to_wait;
785 		int tp1_wakeup_time_us;
786 		int tp2_tp3_wakeup_time_us;
787 		int psr2_tp2_tp3_wakeup_time_us;
788 	} psr;
789 
790 	struct {
791 		u16 pwm_freq_hz;
792 		bool present;
793 		bool active_low_pwm;
794 		u8 min_brightness;	/* min_brightness/255 of max */
795 		u8 controller;		/* brightness controller number */
796 		enum intel_backlight_type type;
797 	} backlight;
798 
799 	/* MIPI DSI */
800 	struct {
801 		u16 panel_id;
802 		struct mipi_config *config;
803 		struct mipi_pps_data *pps;
804 		u16 bl_ports;
805 		u16 cabc_ports;
806 		u8 seq_version;
807 		u32 size;
808 		u8 *data;
809 		const u8 *sequence[MIPI_SEQ_MAX];
810 		u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
811 		enum drm_panel_orientation orientation;
812 	} dsi;
813 
814 	int crt_ddc_pin;
815 
816 	int child_dev_num;
817 	struct child_device_config *child_dev;
818 
819 	struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
820 	struct sdvo_device_mapping sdvo_mappings[2];
821 };
822 
823 enum intel_ddb_partitioning {
824 	INTEL_DDB_PART_1_2,
825 	INTEL_DDB_PART_5_6, /* IVB+ */
826 };
827 
828 struct intel_wm_level {
829 	bool enable;
830 	u32 pri_val;
831 	u32 spr_val;
832 	u32 cur_val;
833 	u32 fbc_val;
834 };
835 
836 struct ilk_wm_values {
837 	u32 wm_pipe[3];
838 	u32 wm_lp[3];
839 	u32 wm_lp_spr[3];
840 	u32 wm_linetime[3];
841 	bool enable_fbc_wm;
842 	enum intel_ddb_partitioning partitioning;
843 };
844 
845 struct g4x_pipe_wm {
846 	u16 plane[I915_MAX_PLANES];
847 	u16 fbc;
848 };
849 
850 struct g4x_sr_wm {
851 	u16 plane;
852 	u16 cursor;
853 	u16 fbc;
854 };
855 
856 struct vlv_wm_ddl_values {
857 	u8 plane[I915_MAX_PLANES];
858 };
859 
860 struct vlv_wm_values {
861 	struct g4x_pipe_wm pipe[3];
862 	struct g4x_sr_wm sr;
863 	struct vlv_wm_ddl_values ddl[3];
864 	u8 level;
865 	bool cxsr;
866 };
867 
868 struct g4x_wm_values {
869 	struct g4x_pipe_wm pipe[2];
870 	struct g4x_sr_wm sr;
871 	struct g4x_sr_wm hpll;
872 	bool cxsr;
873 	bool hpll_en;
874 	bool fbc_en;
875 };
876 
877 struct skl_ddb_entry {
878 	u16 start, end;	/* in number of blocks, 'end' is exclusive */
879 };
880 
skl_ddb_entry_size(const struct skl_ddb_entry * entry)881 static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry)
882 {
883 	return entry->end - entry->start;
884 }
885 
skl_ddb_entry_equal(const struct skl_ddb_entry * e1,const struct skl_ddb_entry * e2)886 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
887 				       const struct skl_ddb_entry *e2)
888 {
889 	if (e1->start == e2->start && e1->end == e2->end)
890 		return true;
891 
892 	return false;
893 }
894 
895 struct skl_ddb_allocation {
896 	u8 enabled_slices; /* GEN11 has configurable 2 slices */
897 };
898 
899 struct skl_ddb_values {
900 	unsigned dirty_pipes;
901 	struct skl_ddb_allocation ddb;
902 };
903 
904 struct skl_wm_level {
905 	u16 min_ddb_alloc;
906 	u16 plane_res_b;
907 	u8 plane_res_l;
908 	bool plane_en;
909 	bool ignore_lines;
910 };
911 
912 /* Stores plane specific WM parameters */
913 struct skl_wm_params {
914 	bool x_tiled, y_tiled;
915 	bool rc_surface;
916 	bool is_planar;
917 	u32 width;
918 	u8 cpp;
919 	u32 plane_pixel_rate;
920 	u32 y_min_scanlines;
921 	u32 plane_bytes_per_line;
922 	uint_fixed_16_16_t plane_blocks_per_line;
923 	uint_fixed_16_16_t y_tile_minimum;
924 	u32 linetime_us;
925 	u32 dbuf_block_size;
926 };
927 
928 enum intel_pipe_crc_source {
929 	INTEL_PIPE_CRC_SOURCE_NONE,
930 	INTEL_PIPE_CRC_SOURCE_PLANE1,
931 	INTEL_PIPE_CRC_SOURCE_PLANE2,
932 	INTEL_PIPE_CRC_SOURCE_PLANE3,
933 	INTEL_PIPE_CRC_SOURCE_PLANE4,
934 	INTEL_PIPE_CRC_SOURCE_PLANE5,
935 	INTEL_PIPE_CRC_SOURCE_PLANE6,
936 	INTEL_PIPE_CRC_SOURCE_PLANE7,
937 	INTEL_PIPE_CRC_SOURCE_PIPE,
938 	/* TV/DP on pre-gen5/vlv can't use the pipe source. */
939 	INTEL_PIPE_CRC_SOURCE_TV,
940 	INTEL_PIPE_CRC_SOURCE_DP_B,
941 	INTEL_PIPE_CRC_SOURCE_DP_C,
942 	INTEL_PIPE_CRC_SOURCE_DP_D,
943 	INTEL_PIPE_CRC_SOURCE_AUTO,
944 	INTEL_PIPE_CRC_SOURCE_MAX,
945 };
946 
947 #define INTEL_PIPE_CRC_ENTRIES_NR	128
948 struct intel_pipe_crc {
949 	spinlock_t lock;
950 	int skipped;
951 	enum intel_pipe_crc_source source;
952 };
953 
954 struct i915_frontbuffer_tracking {
955 	spinlock_t lock;
956 
957 	/*
958 	 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
959 	 * scheduled flips.
960 	 */
961 	unsigned busy_bits;
962 	unsigned flip_bits;
963 };
964 
965 struct i915_virtual_gpu {
966 	struct mutex lock; /* serialises sending of g2v_notify command pkts */
967 	bool active;
968 	u32 caps;
969 };
970 
971 /* used in computing the new watermarks state */
972 struct intel_wm_config {
973 	unsigned int num_pipes_active;
974 	bool sprites_enabled;
975 	bool sprites_scaled;
976 };
977 
978 struct i915_oa_format {
979 	u32 format;
980 	int size;
981 };
982 
983 struct i915_oa_reg {
984 	i915_reg_t addr;
985 	u32 value;
986 };
987 
988 struct i915_oa_config {
989 	char uuid[UUID_STRING_LEN + 1];
990 	int id;
991 
992 	const struct i915_oa_reg *mux_regs;
993 	u32 mux_regs_len;
994 	const struct i915_oa_reg *b_counter_regs;
995 	u32 b_counter_regs_len;
996 	const struct i915_oa_reg *flex_regs;
997 	u32 flex_regs_len;
998 
999 	struct attribute_group sysfs_metric;
1000 	struct attribute *attrs[2];
1001 	struct device_attribute sysfs_metric_id;
1002 
1003 	atomic_t ref_count;
1004 };
1005 
1006 struct i915_perf_stream;
1007 
1008 /**
1009  * struct i915_perf_stream_ops - the OPs to support a specific stream type
1010  */
1011 struct i915_perf_stream_ops {
1012 	/**
1013 	 * @enable: Enables the collection of HW samples, either in response to
1014 	 * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened
1015 	 * without `I915_PERF_FLAG_DISABLED`.
1016 	 */
1017 	void (*enable)(struct i915_perf_stream *stream);
1018 
1019 	/**
1020 	 * @disable: Disables the collection of HW samples, either in response
1021 	 * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying
1022 	 * the stream.
1023 	 */
1024 	void (*disable)(struct i915_perf_stream *stream);
1025 
1026 	/**
1027 	 * @poll_wait: Call poll_wait, passing a wait queue that will be woken
1028 	 * once there is something ready to read() for the stream
1029 	 */
1030 	void (*poll_wait)(struct i915_perf_stream *stream,
1031 			  struct file *file,
1032 			  poll_table *wait);
1033 
1034 	/**
1035 	 * @wait_unlocked: For handling a blocking read, wait until there is
1036 	 * something to ready to read() for the stream. E.g. wait on the same
1037 	 * wait queue that would be passed to poll_wait().
1038 	 */
1039 	int (*wait_unlocked)(struct i915_perf_stream *stream);
1040 
1041 	/**
1042 	 * @read: Copy buffered metrics as records to userspace
1043 	 * **buf**: the userspace, destination buffer
1044 	 * **count**: the number of bytes to copy, requested by userspace
1045 	 * **offset**: zero at the start of the read, updated as the read
1046 	 * proceeds, it represents how many bytes have been copied so far and
1047 	 * the buffer offset for copying the next record.
1048 	 *
1049 	 * Copy as many buffered i915 perf samples and records for this stream
1050 	 * to userspace as will fit in the given buffer.
1051 	 *
1052 	 * Only write complete records; returning -%ENOSPC if there isn't room
1053 	 * for a complete record.
1054 	 *
1055 	 * Return any error condition that results in a short read such as
1056 	 * -%ENOSPC or -%EFAULT, even though these may be squashed before
1057 	 * returning to userspace.
1058 	 */
1059 	int (*read)(struct i915_perf_stream *stream,
1060 		    char __user *buf,
1061 		    size_t count,
1062 		    size_t *offset);
1063 
1064 	/**
1065 	 * @destroy: Cleanup any stream specific resources.
1066 	 *
1067 	 * The stream will always be disabled before this is called.
1068 	 */
1069 	void (*destroy)(struct i915_perf_stream *stream);
1070 };
1071 
1072 /**
1073  * struct i915_perf_stream - state for a single open stream FD
1074  */
1075 struct i915_perf_stream {
1076 	/**
1077 	 * @dev_priv: i915 drm device
1078 	 */
1079 	struct drm_i915_private *dev_priv;
1080 
1081 	/**
1082 	 * @link: Links the stream into ``&drm_i915_private->streams``
1083 	 */
1084 	struct list_head link;
1085 
1086 	/**
1087 	 * @wakeref: As we keep the device awake while the perf stream is
1088 	 * active, we track our runtime pm reference for later release.
1089 	 */
1090 	intel_wakeref_t wakeref;
1091 
1092 	/**
1093 	 * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
1094 	 * properties given when opening a stream, representing the contents
1095 	 * of a single sample as read() by userspace.
1096 	 */
1097 	u32 sample_flags;
1098 
1099 	/**
1100 	 * @sample_size: Considering the configured contents of a sample
1101 	 * combined with the required header size, this is the total size
1102 	 * of a single sample record.
1103 	 */
1104 	int sample_size;
1105 
1106 	/**
1107 	 * @ctx: %NULL if measuring system-wide across all contexts or a
1108 	 * specific context that is being monitored.
1109 	 */
1110 	struct i915_gem_context *ctx;
1111 
1112 	/**
1113 	 * @enabled: Whether the stream is currently enabled, considering
1114 	 * whether the stream was opened in a disabled state and based
1115 	 * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls.
1116 	 */
1117 	bool enabled;
1118 
1119 	/**
1120 	 * @ops: The callbacks providing the implementation of this specific
1121 	 * type of configured stream.
1122 	 */
1123 	const struct i915_perf_stream_ops *ops;
1124 
1125 	/**
1126 	 * @oa_config: The OA configuration used by the stream.
1127 	 */
1128 	struct i915_oa_config *oa_config;
1129 
1130 	/**
1131 	 * The OA context specific information.
1132 	 */
1133 	struct intel_context *pinned_ctx;
1134 	u32 specific_ctx_id;
1135 	u32 specific_ctx_id_mask;
1136 
1137 	struct hrtimer poll_check_timer;
1138 	wait_queue_head_t poll_wq;
1139 	bool pollin;
1140 
1141 	bool periodic;
1142 	int period_exponent;
1143 
1144 	/**
1145 	 * State of the OA buffer.
1146 	 */
1147 	struct {
1148 		struct i915_vma *vma;
1149 		u8 *vaddr;
1150 		u32 last_ctx_id;
1151 		int format;
1152 		int format_size;
1153 		int size_exponent;
1154 
1155 		/**
1156 		 * Locks reads and writes to all head/tail state
1157 		 *
1158 		 * Consider: the head and tail pointer state needs to be read
1159 		 * consistently from a hrtimer callback (atomic context) and
1160 		 * read() fop (user context) with tail pointer updates happening
1161 		 * in atomic context and head updates in user context and the
1162 		 * (unlikely) possibility of read() errors needing to reset all
1163 		 * head/tail state.
1164 		 *
1165 		 * Note: Contention/performance aren't currently a significant
1166 		 * concern here considering the relatively low frequency of
1167 		 * hrtimer callbacks (5ms period) and that reads typically only
1168 		 * happen in response to a hrtimer event and likely complete
1169 		 * before the next callback.
1170 		 *
1171 		 * Note: This lock is not held *while* reading and copying data
1172 		 * to userspace so the value of head observed in htrimer
1173 		 * callbacks won't represent any partial consumption of data.
1174 		 */
1175 		spinlock_t ptr_lock;
1176 
1177 		/**
1178 		 * One 'aging' tail pointer and one 'aged' tail pointer ready to
1179 		 * used for reading.
1180 		 *
1181 		 * Initial values of 0xffffffff are invalid and imply that an
1182 		 * update is required (and should be ignored by an attempted
1183 		 * read)
1184 		 */
1185 		struct {
1186 			u32 offset;
1187 		} tails[2];
1188 
1189 		/**
1190 		 * Index for the aged tail ready to read() data up to.
1191 		 */
1192 		unsigned int aged_tail_idx;
1193 
1194 		/**
1195 		 * A monotonic timestamp for when the current aging tail pointer
1196 		 * was read; used to determine when it is old enough to trust.
1197 		 */
1198 		u64 aging_timestamp;
1199 
1200 		/**
1201 		 * Although we can always read back the head pointer register,
1202 		 * we prefer to avoid trusting the HW state, just to avoid any
1203 		 * risk that some hardware condition could * somehow bump the
1204 		 * head pointer unpredictably and cause us to forward the wrong
1205 		 * OA buffer data to userspace.
1206 		 */
1207 		u32 head;
1208 	} oa_buffer;
1209 };
1210 
1211 /**
1212  * struct i915_oa_ops - Gen specific implementation of an OA unit stream
1213  */
1214 struct i915_oa_ops {
1215 	/**
1216 	 * @is_valid_b_counter_reg: Validates register's address for
1217 	 * programming boolean counters for a particular platform.
1218 	 */
1219 	bool (*is_valid_b_counter_reg)(struct drm_i915_private *dev_priv,
1220 				       u32 addr);
1221 
1222 	/**
1223 	 * @is_valid_mux_reg: Validates register's address for programming mux
1224 	 * for a particular platform.
1225 	 */
1226 	bool (*is_valid_mux_reg)(struct drm_i915_private *dev_priv, u32 addr);
1227 
1228 	/**
1229 	 * @is_valid_flex_reg: Validates register's address for programming
1230 	 * flex EU filtering for a particular platform.
1231 	 */
1232 	bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
1233 
1234 	/**
1235 	 * @enable_metric_set: Selects and applies any MUX configuration to set
1236 	 * up the Boolean and Custom (B/C) counters that are part of the
1237 	 * counter reports being sampled. May apply system constraints such as
1238 	 * disabling EU clock gating as required.
1239 	 */
1240 	int (*enable_metric_set)(struct i915_perf_stream *stream);
1241 
1242 	/**
1243 	 * @disable_metric_set: Remove system constraints associated with using
1244 	 * the OA unit.
1245 	 */
1246 	void (*disable_metric_set)(struct i915_perf_stream *stream);
1247 
1248 	/**
1249 	 * @oa_enable: Enable periodic sampling
1250 	 */
1251 	void (*oa_enable)(struct i915_perf_stream *stream);
1252 
1253 	/**
1254 	 * @oa_disable: Disable periodic sampling
1255 	 */
1256 	void (*oa_disable)(struct i915_perf_stream *stream);
1257 
1258 	/**
1259 	 * @read: Copy data from the circular OA buffer into a given userspace
1260 	 * buffer.
1261 	 */
1262 	int (*read)(struct i915_perf_stream *stream,
1263 		    char __user *buf,
1264 		    size_t count,
1265 		    size_t *offset);
1266 
1267 	/**
1268 	 * @oa_hw_tail_read: read the OA tail pointer register
1269 	 *
1270 	 * In particular this enables us to share all the fiddly code for
1271 	 * handling the OA unit tail pointer race that affects multiple
1272 	 * generations.
1273 	 */
1274 	u32 (*oa_hw_tail_read)(struct i915_perf_stream *stream);
1275 };
1276 
1277 struct intel_cdclk_state {
1278 	unsigned int cdclk, vco, ref, bypass;
1279 	u8 voltage_level;
1280 };
1281 
1282 struct drm_i915_private {
1283 	struct drm_device drm;
1284 
1285 	const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
1286 	struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
1287 	struct intel_driver_caps caps;
1288 
1289 	/**
1290 	 * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
1291 	 * end of stolen which we can optionally use to create GEM objects
1292 	 * backed by stolen memory. Note that stolen_usable_size tells us
1293 	 * exactly how much of this we are actually allowed to use, given that
1294 	 * some portion of it is in fact reserved for use by hardware functions.
1295 	 */
1296 	struct resource dsm;
1297 	/**
1298 	 * Reseved portion of Data Stolen Memory
1299 	 */
1300 	struct resource dsm_reserved;
1301 
1302 	/*
1303 	 * Stolen memory is segmented in hardware with different portions
1304 	 * offlimits to certain functions.
1305 	 *
1306 	 * The drm_mm is initialised to the total accessible range, as found
1307 	 * from the PCI config. On Broadwell+, this is further restricted to
1308 	 * avoid the first page! The upper end of stolen memory is reserved for
1309 	 * hardware functions and similarly removed from the accessible range.
1310 	 */
1311 	resource_size_t stolen_usable_size;	/* Total size minus reserved ranges */
1312 
1313 	struct intel_uncore uncore;
1314 	struct intel_uncore_mmio_debug mmio_debug;
1315 
1316 	struct i915_virtual_gpu vgpu;
1317 
1318 	struct intel_gvt *gvt;
1319 
1320 	struct intel_wopcm wopcm;
1321 
1322 	struct intel_csr csr;
1323 
1324 	struct intel_gmbus gmbus[GMBUS_NUM_PINS];
1325 
1326 	/** gmbus_mutex protects against concurrent usage of the single hw gmbus
1327 	 * controller on different i2c buses. */
1328 	struct mutex gmbus_mutex;
1329 
1330 	/**
1331 	 * Base address of where the gmbus and gpio blocks are located (either
1332 	 * on PCH or on SoC for platforms without PCH).
1333 	 */
1334 	u32 gpio_mmio_base;
1335 
1336 	/* MMIO base address for MIPI regs */
1337 	u32 mipi_mmio_base;
1338 
1339 	u32 psr_mmio_base;
1340 
1341 	u32 pps_mmio_base;
1342 
1343 	wait_queue_head_t gmbus_wait_queue;
1344 
1345 	struct pci_dev *bridge_dev;
1346 
1347 	/* Context used internally to idle the GPU and setup initial state */
1348 	struct i915_gem_context *kernel_context;
1349 
1350 	struct intel_engine_cs *engine[I915_NUM_ENGINES];
1351 	struct rb_root uabi_engines;
1352 
1353 	struct resource mch_res;
1354 
1355 	/* protects the irq masks */
1356 	spinlock_t irq_lock;
1357 
1358 	bool display_irqs_enabled;
1359 
1360 	/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1361 	struct pm_qos_request pm_qos;
1362 
1363 	/* Sideband mailbox protection */
1364 	struct mutex sb_lock;
1365 	struct pm_qos_request sb_qos;
1366 
1367 	/** Cached value of IMR to avoid reads in updating the bitfield */
1368 	union {
1369 		u32 irq_mask;
1370 		u32 de_irq_mask[I915_MAX_PIPES];
1371 	};
1372 	u32 pm_rps_events;
1373 	u32 pipestat_irq_mask[I915_MAX_PIPES];
1374 
1375 	struct i915_hotplug hotplug;
1376 	struct intel_fbc fbc;
1377 	struct i915_drrs drrs;
1378 	struct intel_opregion opregion;
1379 	struct intel_vbt_data vbt;
1380 
1381 	bool preserve_bios_swizzle;
1382 
1383 	/* overlay */
1384 	struct intel_overlay *overlay;
1385 
1386 	/* backlight registers and fields in struct intel_panel */
1387 	struct mutex backlight_lock;
1388 
1389 	/* protects panel power sequencer state */
1390 	struct mutex pps_mutex;
1391 
1392 	unsigned int fsb_freq, mem_freq, is_ddr3;
1393 	unsigned int skl_preferred_vco_freq;
1394 	unsigned int max_cdclk_freq;
1395 
1396 	unsigned int max_dotclk_freq;
1397 	unsigned int rawclk_freq;
1398 	unsigned int hpll_freq;
1399 	unsigned int fdi_pll_freq;
1400 	unsigned int czclk_freq;
1401 
1402 	struct {
1403 		/*
1404 		 * The current logical cdclk state.
1405 		 * See intel_atomic_state.cdclk.logical
1406 		 *
1407 		 * For reading holding any crtc lock is sufficient,
1408 		 * for writing must hold all of them.
1409 		 */
1410 		struct intel_cdclk_state logical;
1411 		/*
1412 		 * The current actual cdclk state.
1413 		 * See intel_atomic_state.cdclk.actual
1414 		 */
1415 		struct intel_cdclk_state actual;
1416 		/* The current hardware cdclk state */
1417 		struct intel_cdclk_state hw;
1418 
1419 		int force_min_cdclk;
1420 	} cdclk;
1421 
1422 	/**
1423 	 * wq - Driver workqueue for GEM.
1424 	 *
1425 	 * NOTE: Work items scheduled here are not allowed to grab any modeset
1426 	 * locks, for otherwise the flushing done in the pageflip code will
1427 	 * result in deadlocks.
1428 	 */
1429 	struct workqueue_struct *wq;
1430 
1431 	/* ordered wq for modesets */
1432 	struct workqueue_struct *modeset_wq;
1433 
1434 	/* Display functions */
1435 	struct drm_i915_display_funcs display;
1436 
1437 	/* PCH chipset type */
1438 	enum intel_pch pch_type;
1439 	unsigned short pch_id;
1440 
1441 	unsigned long quirks;
1442 
1443 	struct drm_atomic_state *modeset_restore_state;
1444 	struct drm_modeset_acquire_ctx reset_ctx;
1445 
1446 	struct i915_ggtt ggtt; /* VM representing the global address space */
1447 
1448 	struct i915_gem_mm mm;
1449 	DECLARE_HASHTABLE(mm_structs, 7);
1450 	struct mutex mm_lock;
1451 
1452 	/* Kernel Modesetting */
1453 
1454 	struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
1455 	struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
1456 
1457 #ifdef CONFIG_DEBUG_FS
1458 	struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
1459 #endif
1460 
1461 	/* dpll and cdclk state is protected by connection_mutex */
1462 	int num_shared_dpll;
1463 	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1464 	const struct intel_dpll_mgr *dpll_mgr;
1465 
1466 	/*
1467 	 * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
1468 	 * Must be global rather than per dpll, because on some platforms
1469 	 * plls share registers.
1470 	 */
1471 	struct mutex dpll_lock;
1472 
1473 	unsigned int active_crtcs;
1474 	/* minimum acceptable cdclk for each pipe */
1475 	int min_cdclk[I915_MAX_PIPES];
1476 	/* minimum acceptable voltage level for each pipe */
1477 	u8 min_voltage_level[I915_MAX_PIPES];
1478 
1479 	int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
1480 
1481 	struct i915_wa_list gt_wa_list;
1482 
1483 	struct i915_frontbuffer_tracking fb_tracking;
1484 
1485 	struct intel_atomic_helper {
1486 		struct llist_head free_list;
1487 		struct work_struct free_work;
1488 	} atomic_helper;
1489 
1490 	u16 orig_clock;
1491 
1492 	bool mchbar_need_disable;
1493 
1494 	struct intel_l3_parity l3_parity;
1495 
1496 	/*
1497 	 * edram size in MB.
1498 	 * Cannot be determined by PCIID. You must always read a register.
1499 	 */
1500 	u32 edram_size_mb;
1501 
1502 	/* gen6+ GT PM state */
1503 	struct intel_gen6_power_mgmt gt_pm;
1504 
1505 	/* ilk-only ips/rps state. Everything in here is protected by the global
1506 	 * mchdev_lock in intel_pm.c */
1507 	struct intel_ilk_power_mgmt ips;
1508 
1509 	struct i915_power_domains power_domains;
1510 
1511 	struct i915_psr psr;
1512 
1513 	struct i915_gpu_error gpu_error;
1514 
1515 	struct drm_i915_gem_object *vlv_pctx;
1516 
1517 	/* list of fbdev register on this device */
1518 	struct intel_fbdev *fbdev;
1519 	struct work_struct fbdev_suspend_work;
1520 
1521 	struct drm_property *broadcast_rgb_property;
1522 	struct drm_property *force_audio_property;
1523 
1524 	/* hda/i915 audio component */
1525 	struct i915_audio_component *audio_component;
1526 	bool audio_component_registered;
1527 	/**
1528 	 * av_mutex - mutex for audio/video sync
1529 	 *
1530 	 */
1531 	struct mutex av_mutex;
1532 	int audio_power_refcount;
1533 
1534 	struct {
1535 		struct mutex mutex;
1536 		struct list_head list;
1537 		struct llist_head free_list;
1538 		struct work_struct free_work;
1539 
1540 		/* The hw wants to have a stable context identifier for the
1541 		 * lifetime of the context (for OA, PASID, faults, etc).
1542 		 * This is limited in execlists to 21 bits.
1543 		 */
1544 		struct ida hw_ida;
1545 #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
1546 #define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
1547 #define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
1548 /* in Gen12 ID 0x7FF is reserved to indicate idle */
1549 #define GEN12_MAX_CONTEXT_HW_ID	(GEN11_MAX_CONTEXT_HW_ID - 1)
1550 		struct list_head hw_id_list;
1551 	} contexts;
1552 
1553 	u32 fdi_rx_config;
1554 
1555 	/* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
1556 	u32 chv_phy_control;
1557 	/*
1558 	 * Shadows for CHV DPLL_MD regs to keep the state
1559 	 * checker somewhat working in the presence hardware
1560 	 * crappiness (can't read out DPLL_MD for pipes B & C).
1561 	 */
1562 	u32 chv_dpll_md[I915_MAX_PIPES];
1563 	u32 bxt_phy_grc;
1564 
1565 	u32 suspend_count;
1566 	bool power_domains_suspended;
1567 	struct i915_suspend_saved_registers regfile;
1568 	struct vlv_s0ix_state *vlv_s0ix_state;
1569 
1570 	enum {
1571 		I915_SAGV_UNKNOWN = 0,
1572 		I915_SAGV_DISABLED,
1573 		I915_SAGV_ENABLED,
1574 		I915_SAGV_NOT_CONTROLLED
1575 	} sagv_status;
1576 
1577 	struct {
1578 		/*
1579 		 * Raw watermark latency values:
1580 		 * in 0.1us units for WM0,
1581 		 * in 0.5us units for WM1+.
1582 		 */
1583 		/* primary */
1584 		u16 pri_latency[5];
1585 		/* sprite */
1586 		u16 spr_latency[5];
1587 		/* cursor */
1588 		u16 cur_latency[5];
1589 		/*
1590 		 * Raw watermark memory latency values
1591 		 * for SKL for all 8 levels
1592 		 * in 1us units.
1593 		 */
1594 		u16 skl_latency[8];
1595 
1596 		/* current hardware state */
1597 		union {
1598 			struct ilk_wm_values hw;
1599 			struct skl_ddb_values skl_hw;
1600 			struct vlv_wm_values vlv;
1601 			struct g4x_wm_values g4x;
1602 		};
1603 
1604 		u8 max_level;
1605 
1606 		/*
1607 		 * Should be held around atomic WM register writing; also
1608 		 * protects * intel_crtc->wm.active and
1609 		 * crtc_state->wm.need_postvbl_update.
1610 		 */
1611 		struct mutex wm_mutex;
1612 
1613 		/*
1614 		 * Set during HW readout of watermarks/DDB.  Some platforms
1615 		 * need to know when we're still using BIOS-provided values
1616 		 * (which we don't fully trust).
1617 		 */
1618 		bool distrust_bios_wm;
1619 	} wm;
1620 
1621 	struct dram_info {
1622 		bool valid;
1623 		bool is_16gb_dimm;
1624 		u8 num_channels;
1625 		u8 ranks;
1626 		u32 bandwidth_kbps;
1627 		bool symmetric_memory;
1628 		enum intel_dram_type {
1629 			INTEL_DRAM_UNKNOWN,
1630 			INTEL_DRAM_DDR3,
1631 			INTEL_DRAM_DDR4,
1632 			INTEL_DRAM_LPDDR3,
1633 			INTEL_DRAM_LPDDR4
1634 		} type;
1635 	} dram_info;
1636 
1637 	struct intel_bw_info {
1638 		unsigned int deratedbw[3]; /* for each QGV point */
1639 		u8 num_qgv_points;
1640 		u8 num_planes;
1641 	} max_bw[6];
1642 
1643 	struct drm_private_obj bw_obj;
1644 
1645 	struct intel_runtime_pm runtime_pm;
1646 
1647 	struct {
1648 		bool initialized;
1649 
1650 		struct kobject *metrics_kobj;
1651 		struct ctl_table_header *sysctl_header;
1652 
1653 		/*
1654 		 * Lock associated with adding/modifying/removing OA configs
1655 		 * in dev_priv->perf.metrics_idr.
1656 		 */
1657 		struct mutex metrics_lock;
1658 
1659 		/*
1660 		 * List of dynamic configurations, you need to hold
1661 		 * dev_priv->perf.metrics_lock to access it.
1662 		 */
1663 		struct idr metrics_idr;
1664 
1665 		/*
1666 		 * Lock associated with anything below within this structure
1667 		 * except exclusive_stream.
1668 		 */
1669 		struct mutex lock;
1670 		struct list_head streams;
1671 
1672 		/*
1673 		 * The stream currently using the OA unit. If accessed
1674 		 * outside a syscall associated to its file
1675 		 * descriptor, you need to hold
1676 		 * dev_priv->drm.struct_mutex.
1677 		 */
1678 		struct i915_perf_stream *exclusive_stream;
1679 
1680 		/**
1681 		 * For rate limiting any notifications of spurious
1682 		 * invalid OA reports
1683 		 */
1684 		struct ratelimit_state spurious_report_rs;
1685 
1686 		struct i915_oa_config test_config;
1687 
1688 		u32 gen7_latched_oastatus1;
1689 		u32 ctx_oactxctrl_offset;
1690 		u32 ctx_flexeu0_offset;
1691 
1692 		/**
1693 		 * The RPT_ID/reason field for Gen8+ includes a bit
1694 		 * to determine if the CTX ID in the report is valid
1695 		 * but the specific bit differs between Gen 8 and 9
1696 		 */
1697 		u32 gen8_valid_ctx_bit;
1698 
1699 		struct i915_oa_ops ops;
1700 		const struct i915_oa_format *oa_formats;
1701 	} perf;
1702 
1703 	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
1704 	struct intel_gt gt;
1705 
1706 	struct {
1707 		struct notifier_block pm_notifier;
1708 
1709 		/**
1710 		 * We leave the user IRQ off as much as possible,
1711 		 * but this means that requests will finish and never
1712 		 * be retired once the system goes idle. Set a timer to
1713 		 * fire periodically while the ring is running. When it
1714 		 * fires, go retire requests.
1715 		 */
1716 		struct delayed_work retire_work;
1717 
1718 		/**
1719 		 * When we detect an idle GPU, we want to turn on
1720 		 * powersaving features. So once we see that there
1721 		 * are no more requests outstanding and no more
1722 		 * arrive within a small period of time, we fire
1723 		 * off the idle_work.
1724 		 */
1725 		struct work_struct idle_work;
1726 	} gem;
1727 
1728 	u8 pch_ssc_use;
1729 
1730 	/* For i945gm vblank irq vs. C3 workaround */
1731 	struct {
1732 		struct work_struct work;
1733 		struct pm_qos_request pm_qos;
1734 		u8 c3_disable_latency;
1735 		u8 enabled;
1736 	} i945gm_vblank;
1737 
1738 	/* perform PHY state sanity checks? */
1739 	bool chv_phy_assert[2];
1740 
1741 	bool ipc_enabled;
1742 
1743 	/* Used to save the pipe-to-encoder mapping for audio */
1744 	struct intel_encoder *av_enc_map[I915_MAX_PIPES];
1745 
1746 	/* necessary resource sharing with HDMI LPE audio driver. */
1747 	struct {
1748 		struct platform_device *platdev;
1749 		int	irq;
1750 	} lpe_audio;
1751 
1752 	struct i915_pmu pmu;
1753 
1754 	struct i915_hdcp_comp_master *hdcp_master;
1755 	bool hdcp_comp_added;
1756 
1757 	/* Mutex to protect the above hdcp component related values. */
1758 	struct mutex hdcp_comp_mutex;
1759 
1760 	/*
1761 	 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
1762 	 * will be rejected. Instead look for a better place.
1763 	 */
1764 };
1765 
1766 struct dram_dimm_info {
1767 	u8 size, width, ranks;
1768 };
1769 
1770 struct dram_channel_info {
1771 	struct dram_dimm_info dimm_l, dimm_s;
1772 	u8 ranks;
1773 	bool is_16gb_dimm;
1774 };
1775 
to_i915(const struct drm_device * dev)1776 static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1777 {
1778 	return container_of(dev, struct drm_i915_private, drm);
1779 }
1780 
kdev_to_i915(struct device * kdev)1781 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
1782 {
1783 	return dev_get_drvdata(kdev);
1784 }
1785 
pdev_to_i915(struct pci_dev * pdev)1786 static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
1787 {
1788 	return pci_get_drvdata(pdev);
1789 }
1790 
1791 /* Simple iterator over all initialised engines */
1792 #define for_each_engine(engine__, dev_priv__, id__) \
1793 	for ((id__) = 0; \
1794 	     (id__) < I915_NUM_ENGINES; \
1795 	     (id__)++) \
1796 		for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
1797 
1798 /* Iterator over subset of engines selected by mask */
1799 #define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
1800 	for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->engine_mask; \
1801 	     (tmp__) ? \
1802 	     ((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \
1803 	     0;)
1804 
1805 #define rb_to_uabi_engine(rb) \
1806 	rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
1807 
1808 #define for_each_uabi_engine(engine__, i915__) \
1809 	for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
1810 	     (engine__); \
1811 	     (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
1812 
1813 #define I915_GTT_OFFSET_NONE ((u32)-1)
1814 
1815 /*
1816  * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
1817  * considered to be the frontbuffer for the given plane interface-wise. This
1818  * doesn't mean that the hw necessarily already scans it out, but that any
1819  * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
1820  *
1821  * We have one bit per pipe and per scanout plane type.
1822  */
1823 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
1824 #define INTEL_FRONTBUFFER(pipe, plane_id) ({ \
1825 	BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \
1826 	BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \
1827 	BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \
1828 })
1829 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \
1830 	BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
1831 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
1832 	GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
1833 		INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
1834 
1835 #define INTEL_INFO(dev_priv)	(&(dev_priv)->__info)
1836 #define RUNTIME_INFO(dev_priv)	(&(dev_priv)->__runtime)
1837 #define DRIVER_CAPS(dev_priv)	(&(dev_priv)->caps)
1838 
1839 #define INTEL_GEN(dev_priv)	(INTEL_INFO(dev_priv)->gen)
1840 #define INTEL_DEVID(dev_priv)	(RUNTIME_INFO(dev_priv)->device_id)
1841 
1842 #define REVID_FOREVER		0xff
1843 #define INTEL_REVID(dev_priv)	((dev_priv)->drm.pdev->revision)
1844 
1845 #define INTEL_GEN_MASK(s, e) ( \
1846 	BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
1847 	BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
1848 	GENMASK((e) - 1, (s) - 1))
1849 
1850 /* Returns true if Gen is in inclusive range [Start, End] */
1851 #define IS_GEN_RANGE(dev_priv, s, e) \
1852 	(!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e))))
1853 
1854 #define IS_GEN(dev_priv, n) \
1855 	(BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \
1856 	 INTEL_INFO(dev_priv)->gen == (n))
1857 
1858 /*
1859  * Return true if revision is in range [since,until] inclusive.
1860  *
1861  * Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
1862  */
1863 #define IS_REVID(p, since, until) \
1864 	(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
1865 
1866 static __always_inline unsigned int
__platform_mask_index(const struct intel_runtime_info * info,enum intel_platform p)1867 __platform_mask_index(const struct intel_runtime_info *info,
1868 		      enum intel_platform p)
1869 {
1870 	const unsigned int pbits =
1871 		BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
1872 
1873 	/* Expand the platform_mask array if this fails. */
1874 	BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
1875 		     pbits * ARRAY_SIZE(info->platform_mask));
1876 
1877 	return p / pbits;
1878 }
1879 
1880 static __always_inline unsigned int
__platform_mask_bit(const struct intel_runtime_info * info,enum intel_platform p)1881 __platform_mask_bit(const struct intel_runtime_info *info,
1882 		    enum intel_platform p)
1883 {
1884 	const unsigned int pbits =
1885 		BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
1886 
1887 	return p % pbits + INTEL_SUBPLATFORM_BITS;
1888 }
1889 
1890 static inline u32
intel_subplatform(const struct intel_runtime_info * info,enum intel_platform p)1891 intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
1892 {
1893 	const unsigned int pi = __platform_mask_index(info, p);
1894 
1895 	return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
1896 }
1897 
1898 static __always_inline bool
IS_PLATFORM(const struct drm_i915_private * i915,enum intel_platform p)1899 IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
1900 {
1901 	const struct intel_runtime_info *info = RUNTIME_INFO(i915);
1902 	const unsigned int pi = __platform_mask_index(info, p);
1903 	const unsigned int pb = __platform_mask_bit(info, p);
1904 
1905 	BUILD_BUG_ON(!__builtin_constant_p(p));
1906 
1907 	return info->platform_mask[pi] & BIT(pb);
1908 }
1909 
1910 static __always_inline bool
IS_SUBPLATFORM(const struct drm_i915_private * i915,enum intel_platform p,unsigned int s)1911 IS_SUBPLATFORM(const struct drm_i915_private *i915,
1912 	       enum intel_platform p, unsigned int s)
1913 {
1914 	const struct intel_runtime_info *info = RUNTIME_INFO(i915);
1915 	const unsigned int pi = __platform_mask_index(info, p);
1916 	const unsigned int pb = __platform_mask_bit(info, p);
1917 	const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
1918 	const u32 mask = info->platform_mask[pi];
1919 
1920 	BUILD_BUG_ON(!__builtin_constant_p(p));
1921 	BUILD_BUG_ON(!__builtin_constant_p(s));
1922 	BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);
1923 
1924 	/* Shift and test on the MSB position so sign flag can be used. */
1925 	return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
1926 }
1927 
1928 #define IS_MOBILE(dev_priv)	(INTEL_INFO(dev_priv)->is_mobile)
1929 
1930 #define IS_I830(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I830)
1931 #define IS_I845G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I845G)
1932 #define IS_I85X(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I85X)
1933 #define IS_I865G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I865G)
1934 #define IS_I915G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I915G)
1935 #define IS_I915GM(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I915GM)
1936 #define IS_I945G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I945G)
1937 #define IS_I945GM(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I945GM)
1938 #define IS_I965G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I965G)
1939 #define IS_I965GM(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I965GM)
1940 #define IS_G45(dev_priv)	IS_PLATFORM(dev_priv, INTEL_G45)
1941 #define IS_GM45(dev_priv)	IS_PLATFORM(dev_priv, INTEL_GM45)
1942 #define IS_G4X(dev_priv)	(IS_G45(dev_priv) || IS_GM45(dev_priv))
1943 #define IS_PINEVIEW(dev_priv)	IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
1944 #define IS_G33(dev_priv)	IS_PLATFORM(dev_priv, INTEL_G33)
1945 #define IS_IRONLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
1946 #define IS_IRONLAKE_M(dev_priv) \
1947 	(IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
1948 #define IS_IVYBRIDGE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
1949 #define IS_IVB_GT1(dev_priv)	(IS_IVYBRIDGE(dev_priv) && \
1950 				 INTEL_INFO(dev_priv)->gt == 1)
1951 #define IS_VALLEYVIEW(dev_priv)	IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
1952 #define IS_CHERRYVIEW(dev_priv)	IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
1953 #define IS_HASWELL(dev_priv)	IS_PLATFORM(dev_priv, INTEL_HASWELL)
1954 #define IS_BROADWELL(dev_priv)	IS_PLATFORM(dev_priv, INTEL_BROADWELL)
1955 #define IS_SKYLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
1956 #define IS_BROXTON(dev_priv)	IS_PLATFORM(dev_priv, INTEL_BROXTON)
1957 #define IS_KABYLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
1958 #define IS_GEMINILAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
1959 #define IS_COFFEELAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
1960 #define IS_CANNONLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
1961 #define IS_ICELAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_ICELAKE)
1962 #define IS_ELKHARTLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
1963 #define IS_TIGERLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
1964 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
1965 				    (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
1966 #define IS_BDW_ULT(dev_priv) \
1967 	IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
1968 #define IS_BDW_ULX(dev_priv) \
1969 	IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
1970 #define IS_BDW_GT3(dev_priv)	(IS_BROADWELL(dev_priv) && \
1971 				 INTEL_INFO(dev_priv)->gt == 3)
1972 #define IS_HSW_ULT(dev_priv) \
1973 	IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
1974 #define IS_HSW_GT3(dev_priv)	(IS_HASWELL(dev_priv) && \
1975 				 INTEL_INFO(dev_priv)->gt == 3)
1976 #define IS_HSW_GT1(dev_priv)	(IS_HASWELL(dev_priv) && \
1977 				 INTEL_INFO(dev_priv)->gt == 1)
1978 /* ULX machines are also considered ULT. */
1979 #define IS_HSW_ULX(dev_priv) \
1980 	IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
1981 #define IS_SKL_ULT(dev_priv) \
1982 	IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
1983 #define IS_SKL_ULX(dev_priv) \
1984 	IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
1985 #define IS_KBL_ULT(dev_priv) \
1986 	IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
1987 #define IS_KBL_ULX(dev_priv) \
1988 	IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
1989 #define IS_SKL_GT2(dev_priv)	(IS_SKYLAKE(dev_priv) && \
1990 				 INTEL_INFO(dev_priv)->gt == 2)
1991 #define IS_SKL_GT3(dev_priv)	(IS_SKYLAKE(dev_priv) && \
1992 				 INTEL_INFO(dev_priv)->gt == 3)
1993 #define IS_SKL_GT4(dev_priv)	(IS_SKYLAKE(dev_priv) && \
1994 				 INTEL_INFO(dev_priv)->gt == 4)
1995 #define IS_KBL_GT2(dev_priv)	(IS_KABYLAKE(dev_priv) && \
1996 				 INTEL_INFO(dev_priv)->gt == 2)
1997 #define IS_KBL_GT3(dev_priv)	(IS_KABYLAKE(dev_priv) && \
1998 				 INTEL_INFO(dev_priv)->gt == 3)
1999 #define IS_CFL_ULT(dev_priv) \
2000 	IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
2001 #define IS_CFL_ULX(dev_priv) \
2002 	IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
2003 #define IS_CFL_GT2(dev_priv)	(IS_COFFEELAKE(dev_priv) && \
2004 				 INTEL_INFO(dev_priv)->gt == 2)
2005 #define IS_CFL_GT3(dev_priv)	(IS_COFFEELAKE(dev_priv) && \
2006 				 INTEL_INFO(dev_priv)->gt == 3)
2007 #define IS_CNL_WITH_PORT_F(dev_priv) \
2008 	IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF)
2009 #define IS_ICL_WITH_PORT_F(dev_priv) \
2010 	IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
2011 
2012 #define SKL_REVID_A0		0x0
2013 #define SKL_REVID_B0		0x1
2014 #define SKL_REVID_C0		0x2
2015 #define SKL_REVID_D0		0x3
2016 #define SKL_REVID_E0		0x4
2017 #define SKL_REVID_F0		0x5
2018 #define SKL_REVID_G0		0x6
2019 #define SKL_REVID_H0		0x7
2020 
2021 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
2022 
2023 #define BXT_REVID_A0		0x0
2024 #define BXT_REVID_A1		0x1
2025 #define BXT_REVID_B0		0x3
2026 #define BXT_REVID_B_LAST	0x8
2027 #define BXT_REVID_C0		0x9
2028 
2029 #define IS_BXT_REVID(dev_priv, since, until) \
2030 	(IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
2031 
2032 #define KBL_REVID_A0		0x0
2033 #define KBL_REVID_B0		0x1
2034 #define KBL_REVID_C0		0x2
2035 #define KBL_REVID_D0		0x3
2036 #define KBL_REVID_E0		0x4
2037 
2038 #define IS_KBL_REVID(dev_priv, since, until) \
2039 	(IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
2040 
2041 #define GLK_REVID_A0		0x0
2042 #define GLK_REVID_A1		0x1
2043 
2044 #define IS_GLK_REVID(dev_priv, since, until) \
2045 	(IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
2046 
2047 #define CNL_REVID_A0		0x0
2048 #define CNL_REVID_B0		0x1
2049 #define CNL_REVID_C0		0x2
2050 
2051 #define IS_CNL_REVID(p, since, until) \
2052 	(IS_CANNONLAKE(p) && IS_REVID(p, since, until))
2053 
2054 #define ICL_REVID_A0		0x0
2055 #define ICL_REVID_A2		0x1
2056 #define ICL_REVID_B0		0x3
2057 #define ICL_REVID_B2		0x4
2058 #define ICL_REVID_C0		0x5
2059 
2060 #define IS_ICL_REVID(p, since, until) \
2061 	(IS_ICELAKE(p) && IS_REVID(p, since, until))
2062 
2063 #define IS_LP(dev_priv)	(INTEL_INFO(dev_priv)->is_lp)
2064 #define IS_GEN9_LP(dev_priv)	(IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
2065 #define IS_GEN9_BC(dev_priv)	(IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
2066 
2067 #define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id))
2068 
2069 #define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({		\
2070 	unsigned int first__ = (first);					\
2071 	unsigned int count__ = (count);					\
2072 	(INTEL_INFO(dev_priv)->engine_mask &				\
2073 	 GENMASK(first__ + count__ - 1, first__)) >> first__;		\
2074 })
2075 #define VDBOX_MASK(dev_priv) \
2076 	ENGINE_INSTANCES_MASK(dev_priv, VCS0, I915_MAX_VCS)
2077 #define VEBOX_MASK(dev_priv) \
2078 	ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
2079 
2080 /*
2081  * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
2082  * All later gens can run the final buffer from the ppgtt
2083  */
2084 #define CMDPARSER_USES_GGTT(dev_priv) IS_GEN(dev_priv, 7)
2085 
2086 #define HAS_LLC(dev_priv)	(INTEL_INFO(dev_priv)->has_llc)
2087 #define HAS_SNOOP(dev_priv)	(INTEL_INFO(dev_priv)->has_snoop)
2088 #define HAS_EDRAM(dev_priv)	((dev_priv)->edram_size_mb)
2089 #define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6)
2090 #define HAS_WT(dev_priv)	((IS_HASWELL(dev_priv) || \
2091 				 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
2092 
2093 #define HWS_NEEDS_PHYSICAL(dev_priv)	(INTEL_INFO(dev_priv)->hws_needs_physical)
2094 
2095 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
2096 		(INTEL_INFO(dev_priv)->has_logical_ring_contexts)
2097 #define HAS_LOGICAL_RING_ELSQ(dev_priv) \
2098 		(INTEL_INFO(dev_priv)->has_logical_ring_elsq)
2099 #define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
2100 		(INTEL_INFO(dev_priv)->has_logical_ring_preemption)
2101 
2102 #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
2103 
2104 #define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
2105 #define HAS_PPGTT(dev_priv) \
2106 	(INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
2107 #define HAS_FULL_PPGTT(dev_priv) \
2108 	(INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
2109 
2110 #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
2111 	GEM_BUG_ON((sizes) == 0); \
2112 	((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
2113 })
2114 
2115 #define HAS_OVERLAY(dev_priv)		 (INTEL_INFO(dev_priv)->display.has_overlay)
2116 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
2117 		(INTEL_INFO(dev_priv)->display.overlay_needs_physical)
2118 
2119 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
2120 #define HAS_BROKEN_CS_TLB(dev_priv)	(IS_I830(dev_priv) || IS_I845G(dev_priv))
2121 
2122 #define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv)	\
2123 	(IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9))
2124 
2125 /* WaRsDisableCoarsePowerGating:skl,cnl */
2126 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
2127 	(IS_CANNONLAKE(dev_priv) || IS_GEN(dev_priv, 9))
2128 
2129 #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
2130 #define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
2131 					IS_GEMINILAKE(dev_priv) || \
2132 					IS_KABYLAKE(dev_priv))
2133 
2134 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
2135  * rows, which changed the alignment requirements and fence programming.
2136  */
2137 #define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN(dev_priv, 2) && \
2138 					 !(IS_I915G(dev_priv) || \
2139 					 IS_I915GM(dev_priv)))
2140 #define SUPPORTS_TV(dev_priv)		(INTEL_INFO(dev_priv)->display.supports_tv)
2141 #define I915_HAS_HOTPLUG(dev_priv)	(INTEL_INFO(dev_priv)->display.has_hotplug)
2142 
2143 #define HAS_FW_BLC(dev_priv) 	(INTEL_GEN(dev_priv) > 2)
2144 #define HAS_FBC(dev_priv)	(INTEL_INFO(dev_priv)->display.has_fbc)
2145 #define HAS_CUR_FBC(dev_priv)	(!HAS_GMCH(dev_priv) && INTEL_GEN(dev_priv) >= 7)
2146 
2147 #define HAS_IPS(dev_priv)	(IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
2148 
2149 #define HAS_DP_MST(dev_priv)	(INTEL_INFO(dev_priv)->display.has_dp_mst)
2150 
2151 #define HAS_DDI(dev_priv)		 (INTEL_INFO(dev_priv)->display.has_ddi)
2152 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
2153 #define HAS_PSR(dev_priv)		 (INTEL_INFO(dev_priv)->display.has_psr)
2154 #define HAS_TRANSCODER_EDP(dev_priv)	 (INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_EDP] != 0)
2155 
2156 #define HAS_RC6(dev_priv)		 (INTEL_INFO(dev_priv)->has_rc6)
2157 #define HAS_RC6p(dev_priv)		 (INTEL_INFO(dev_priv)->has_rc6p)
2158 #define HAS_RC6pp(dev_priv)		 (false) /* HW was never validated */
2159 
2160 #define HAS_RPS(dev_priv)	(INTEL_INFO(dev_priv)->has_rps)
2161 
2162 #define HAS_CSR(dev_priv)	(INTEL_INFO(dev_priv)->display.has_csr)
2163 
2164 #define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
2165 #define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
2166 
2167 #define HAS_IPC(dev_priv)		 (INTEL_INFO(dev_priv)->display.has_ipc)
2168 
2169 #define HAS_GT_UC(dev_priv)	(INTEL_INFO(dev_priv)->has_gt_uc)
2170 
2171 /* Having GuC is not the same as using GuC */
2172 #define USES_GUC(dev_priv)		intel_uc_uses_guc(&(dev_priv)->gt.uc)
2173 #define USES_GUC_SUBMISSION(dev_priv)	intel_uc_uses_guc_submission(&(dev_priv)->gt.uc)
2174 
2175 #define HAS_POOLED_EU(dev_priv)	(INTEL_INFO(dev_priv)->has_pooled_eu)
2176 
2177 #define HAS_GLOBAL_MOCS_REGISTERS(dev_priv)	(INTEL_INFO(dev_priv)->has_global_mocs)
2178 
2179 
2180 #define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
2181 
2182 #define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
2183 
2184 /* DPF == dynamic parity feature */
2185 #define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
2186 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
2187 				 2 : HAS_L3_DPF(dev_priv))
2188 
2189 #define GT_FREQUENCY_MULTIPLIER 50
2190 #define GEN9_FREQ_SCALER 3
2191 
2192 #define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0)
2193 
intel_vtd_active(void)2194 static inline bool intel_vtd_active(void)
2195 {
2196 #ifdef CONFIG_INTEL_IOMMU
2197 	if (intel_iommu_gfx_mapped)
2198 		return true;
2199 #endif
2200 	return false;
2201 }
2202 
intel_scanout_needs_vtd_wa(struct drm_i915_private * dev_priv)2203 static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
2204 {
2205 	return INTEL_GEN(dev_priv) >= 6 && intel_vtd_active();
2206 }
2207 
2208 static inline bool
intel_ggtt_update_needs_vtd_wa(struct drm_i915_private * dev_priv)2209 intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
2210 {
2211 	return IS_BROXTON(dev_priv) && intel_vtd_active();
2212 }
2213 
2214 /* i915_drv.c */
2215 #ifdef CONFIG_COMPAT
2216 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
2217 #else
2218 #define i915_compat_ioctl NULL
2219 #endif
2220 extern const struct dev_pm_ops i915_pm_ops;
2221 
2222 int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
2223 void i915_driver_remove(struct drm_i915_private *i915);
2224 
2225 void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
2226 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2227 
intel_gvt_active(struct drm_i915_private * dev_priv)2228 static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
2229 {
2230 	return dev_priv->gvt;
2231 }
2232 
intel_vgpu_active(struct drm_i915_private * dev_priv)2233 static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
2234 {
2235 	return dev_priv->vgpu.active;
2236 }
2237 
2238 int i915_getparam_ioctl(struct drm_device *dev, void *data,
2239 			struct drm_file *file_priv);
2240 
2241 /* i915_gem.c */
2242 int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
2243 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
2244 void i915_gem_sanitize(struct drm_i915_private *i915);
2245 int i915_gem_init_early(struct drm_i915_private *dev_priv);
2246 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
2247 int i915_gem_freeze(struct drm_i915_private *dev_priv);
2248 int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
2249 
i915_gem_drain_freed_objects(struct drm_i915_private * i915)2250 static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
2251 {
2252 	/*
2253 	 * A single pass should suffice to release all the freed objects (along
2254 	 * most call paths) , but be a little more paranoid in that freeing
2255 	 * the objects does take a little amount of time, during which the rcu
2256 	 * callbacks could have added new objects into the freed list, and
2257 	 * armed the work again.
2258 	 */
2259 	while (atomic_read(&i915->mm.free_count)) {
2260 		flush_work(&i915->mm.free_work);
2261 		rcu_barrier();
2262 	}
2263 }
2264 
i915_gem_drain_workqueue(struct drm_i915_private * i915)2265 static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
2266 {
2267 	/*
2268 	 * Similar to objects above (see i915_gem_drain_freed-objects), in
2269 	 * general we have workers that are armed by RCU and then rearm
2270 	 * themselves in their callbacks. To be paranoid, we need to
2271 	 * drain the workqueue a second time after waiting for the RCU
2272 	 * grace period so that we catch work queued via RCU from the first
2273 	 * pass. As neither drain_workqueue() nor flush_workqueue() report
2274 	 * a result, we make an assumption that we only don't require more
2275 	 * than 3 passes to catch all _recursive_ RCU delayed work.
2276 	 *
2277 	 */
2278 	int pass = 3;
2279 	do {
2280 		flush_workqueue(i915->wq);
2281 		rcu_barrier();
2282 		i915_gem_drain_freed_objects(i915);
2283 	} while (--pass);
2284 	drain_workqueue(i915->wq);
2285 }
2286 
2287 struct i915_vma * __must_check
2288 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
2289 			 const struct i915_ggtt_view *view,
2290 			 u64 size,
2291 			 u64 alignment,
2292 			 u64 flags);
2293 
2294 int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
2295 			   unsigned long flags);
2296 #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
2297 
2298 struct i915_vma * __must_check
2299 i915_gem_object_pin(struct drm_i915_gem_object *obj,
2300 		    struct i915_address_space *vm,
2301 		    const struct i915_ggtt_view *view,
2302 		    u64 size,
2303 		    u64 alignment,
2304 		    u64 flags);
2305 
2306 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
2307 
2308 static inline int __must_check
i915_mutex_lock_interruptible(struct drm_device * dev)2309 i915_mutex_lock_interruptible(struct drm_device *dev)
2310 {
2311 	return mutex_lock_interruptible(&dev->struct_mutex);
2312 }
2313 
2314 int i915_gem_dumb_create(struct drm_file *file_priv,
2315 			 struct drm_device *dev,
2316 			 struct drm_mode_create_dumb *args);
2317 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
2318 		      u32 handle, u64 *offset);
2319 int i915_gem_mmap_gtt_version(void);
2320 
2321 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
2322 
i915_reset_count(struct i915_gpu_error * error)2323 static inline u32 i915_reset_count(struct i915_gpu_error *error)
2324 {
2325 	return atomic_read(&error->reset_count);
2326 }
2327 
i915_reset_engine_count(struct i915_gpu_error * error,struct intel_engine_cs * engine)2328 static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
2329 					  struct intel_engine_cs *engine)
2330 {
2331 	return atomic_read(&error->reset_engine_count[engine->uabi_class]);
2332 }
2333 
2334 void i915_gem_init_mmio(struct drm_i915_private *i915);
2335 int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
2336 int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
2337 void i915_gem_driver_register(struct drm_i915_private *i915);
2338 void i915_gem_driver_unregister(struct drm_i915_private *i915);
2339 void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
2340 void i915_gem_driver_release(struct drm_i915_private *dev_priv);
2341 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
2342 			   unsigned int flags, long timeout);
2343 void i915_gem_suspend(struct drm_i915_private *dev_priv);
2344 void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
2345 void i915_gem_resume(struct drm_i915_private *dev_priv);
2346 vm_fault_t i915_gem_fault(struct vm_fault *vmf);
2347 
2348 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
2349 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
2350 
2351 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2352 				    enum i915_cache_level cache_level);
2353 
2354 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
2355 				struct dma_buf *dma_buf);
2356 
2357 struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags);
2358 
2359 static inline struct i915_gem_context *
__i915_gem_context_lookup_rcu(struct drm_i915_file_private * file_priv,u32 id)2360 __i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
2361 {
2362 	return idr_find(&file_priv->context_idr, id);
2363 }
2364 
2365 static inline struct i915_gem_context *
i915_gem_context_lookup(struct drm_i915_file_private * file_priv,u32 id)2366 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2367 {
2368 	struct i915_gem_context *ctx;
2369 
2370 	rcu_read_lock();
2371 	ctx = __i915_gem_context_lookup_rcu(file_priv, id);
2372 	if (ctx && !kref_get_unless_zero(&ctx->ref))
2373 		ctx = NULL;
2374 	rcu_read_unlock();
2375 
2376 	return ctx;
2377 }
2378 
2379 /* i915_gem_evict.c */
2380 int __must_check i915_gem_evict_something(struct i915_address_space *vm,
2381 					  u64 min_size, u64 alignment,
2382 					  unsigned cache_level,
2383 					  u64 start, u64 end,
2384 					  unsigned flags);
2385 int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
2386 					 struct drm_mm_node *node,
2387 					 unsigned int flags);
2388 int i915_gem_evict_vm(struct i915_address_space *vm);
2389 
2390 /* i915_gem_internal.c */
2391 struct drm_i915_gem_object *
2392 i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
2393 				phys_addr_t size);
2394 
2395 /* i915_gem_tiling.c */
i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object * obj)2396 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
2397 {
2398 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2399 
2400 	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
2401 		i915_gem_object_is_tiled(obj);
2402 }
2403 
2404 u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size,
2405 			unsigned int tiling, unsigned int stride);
2406 u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
2407 			     unsigned int tiling, unsigned int stride);
2408 
2409 const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
2410 
2411 /* i915_cmd_parser.c */
2412 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
2413 void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
2414 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
2415 int intel_engine_cmd_parser(struct i915_gem_context *cxt,
2416 			    struct intel_engine_cs *engine,
2417 			    struct drm_i915_gem_object *batch_obj,
2418 			    u64 user_batch_start,
2419 			    u32 batch_start_offset,
2420 			    u32 batch_len,
2421 			    struct drm_i915_gem_object *shadow_batch_obj,
2422 			    u64 shadow_batch_start);
2423 
2424 /* intel_device_info.c */
2425 static inline struct intel_device_info *
mkwrite_device_info(struct drm_i915_private * dev_priv)2426 mkwrite_device_info(struct drm_i915_private *dev_priv)
2427 {
2428 	return (struct intel_device_info *)INTEL_INFO(dev_priv);
2429 }
2430 
2431 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
2432 			struct drm_file *file);
2433 
2434 #define __I915_REG_OP(op__, dev_priv__, ...) \
2435 	intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
2436 
2437 #define I915_READ(reg__)	 __I915_REG_OP(read, dev_priv, (reg__))
2438 #define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__))
2439 
2440 #define POSTING_READ(reg__)	__I915_REG_OP(posting_read, dev_priv, (reg__))
2441 
2442 /* These are untraced mmio-accessors that are only valid to be used inside
2443  * critical sections, such as inside IRQ handlers, where forcewake is explicitly
2444  * controlled.
2445  *
2446  * Think twice, and think again, before using these.
2447  *
2448  * As an example, these accessors can possibly be used between:
2449  *
2450  * spin_lock_irq(&dev_priv->uncore.lock);
2451  * intel_uncore_forcewake_get__locked();
2452  *
2453  * and
2454  *
2455  * intel_uncore_forcewake_put__locked();
2456  * spin_unlock_irq(&dev_priv->uncore.lock);
2457  *
2458  *
2459  * Note: some registers may not need forcewake held, so
2460  * intel_uncore_forcewake_{get,put} can be omitted, see
2461  * intel_uncore_forcewake_for_reg().
2462  *
2463  * Certain architectures will die if the same cacheline is concurrently accessed
2464  * by different clients (e.g. on Ivybridge). Access to registers should
2465  * therefore generally be serialised, by either the dev_priv->uncore.lock or
2466  * a more localised lock guarding all access to that bank of registers.
2467  */
2468 #define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
2469 #define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
2470 
2471 /* register wait wrappers for display regs */
2472 #define intel_de_wait_for_register(dev_priv_, reg_, mask_, value_, timeout_) \
2473 	intel_wait_for_register(&(dev_priv_)->uncore, \
2474 				(reg_), (mask_), (value_), (timeout_))
2475 
2476 #define intel_de_wait_for_set(dev_priv_, reg_, mask_, timeout_) ({	\
2477 	u32 mask__ = (mask_);						\
2478 	intel_de_wait_for_register((dev_priv_), (reg_),			\
2479 				   mask__, mask__, (timeout_)); \
2480 })
2481 
2482 #define intel_de_wait_for_clear(dev_priv_, reg_, mask_, timeout_) \
2483 	intel_de_wait_for_register((dev_priv_), (reg_), (mask_), 0, (timeout_))
2484 
2485 /* i915_mm.c */
2486 int remap_io_mapping(struct vm_area_struct *vma,
2487 		     unsigned long addr, unsigned long pfn, unsigned long size,
2488 		     struct io_mapping *iomap);
2489 
intel_hws_csb_write_index(struct drm_i915_private * i915)2490 static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
2491 {
2492 	if (INTEL_GEN(i915) >= 10)
2493 		return CNL_HWS_CSB_WRITE_INDEX;
2494 	else
2495 		return I915_HWS_CSB_WRITE_INDEX;
2496 }
2497 
2498 static inline enum i915_map_type
i915_coherent_map_type(struct drm_i915_private * i915)2499 i915_coherent_map_type(struct drm_i915_private *i915)
2500 {
2501 	return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
2502 }
2503 
2504 #endif
2505