1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #ifndef __INTEL_GT_TYPES__
7 #define __INTEL_GT_TYPES__
8 
9 #include <linux/ktime.h>
10 #include <linux/list.h>
11 #include <linux/mutex.h>
12 #include <linux/notifier.h>
13 #include <linux/spinlock.h>
14 #include <linux/types.h>
15 
16 #include "uc/intel_uc.h"
17 
18 #include "i915_vma.h"
19 #include "intel_engine_types.h"
20 #include "intel_gt_buffer_pool_types.h"
21 #include "intel_llc_types.h"
22 #include "intel_reset_types.h"
23 #include "intel_rc6_types.h"
24 #include "intel_rps_types.h"
25 #include "intel_wakeref.h"
26 
27 struct drm_i915_private;
28 struct i915_ggtt;
29 struct intel_engine_cs;
30 struct intel_uncore;
31 
32 struct intel_gt {
33 	struct drm_i915_private *i915;
34 	struct intel_uncore *uncore;
35 	struct i915_ggtt *ggtt;
36 
37 	struct intel_uc uc;
38 
39 	struct intel_gt_timelines {
40 		spinlock_t lock; /* protects active_list */
41 		struct list_head active_list;
42 
43 		/* Pack multiple timelines' seqnos into the same page */
44 		spinlock_t hwsp_lock;
45 		struct list_head hwsp_free_list;
46 	} timelines;
47 
48 	struct intel_gt_requests {
49 		/**
50 		 * We leave the user IRQ off as much as possible,
51 		 * but this means that requests will finish and never
52 		 * be retired once the system goes idle. Set a timer to
53 		 * fire periodically while the ring is running. When it
54 		 * fires, go retire requests.
55 		 */
56 		struct delayed_work retire_work;
57 	} requests;
58 
59 	struct intel_wakeref wakeref;
60 	atomic_t user_wakeref;
61 
62 	struct list_head closed_vma;
63 	spinlock_t closed_lock; /* guards the list of closed_vma */
64 
65 	ktime_t last_init_time;
66 	struct intel_reset reset;
67 
68 	/**
69 	 * Is the GPU currently considered idle, or busy executing
70 	 * userspace requests? Whilst idle, we allow runtime power
71 	 * management to power down the hardware and display clocks.
72 	 * In order to reduce the effect on performance, there
73 	 * is a slight delay before we do so.
74 	 */
75 	intel_wakeref_t awake;
76 
77 	u32 clock_frequency;
78 
79 	struct intel_llc llc;
80 	struct intel_rc6 rc6;
81 	struct intel_rps rps;
82 
83 	spinlock_t irq_lock;
84 	u32 gt_imr;
85 	u32 pm_ier;
86 	u32 pm_imr;
87 
88 	u32 pm_guc_events;
89 
90 	struct intel_engine_cs *engine[I915_NUM_ENGINES];
91 	struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
92 					    [MAX_ENGINE_INSTANCE + 1];
93 
94 	/*
95 	 * Default address space (either GGTT or ppGTT depending on arch).
96 	 *
97 	 * Reserved for exclusive use by the kernel.
98 	 */
99 	struct i915_address_space *vm;
100 
101 	/*
102 	 * A pool of objects to use as shadow copies of client batch buffers
103 	 * when the command parser is enabled. Prevents the client from
104 	 * modifying the batch contents after software parsing.
105 	 *
106 	 * Buffers older than 1s are periodically reaped from the pool,
107 	 * or may be reclaimed by the shrinker before then.
108 	 */
109 	struct intel_gt_buffer_pool buffer_pool;
110 
111 	struct i915_vma *scratch;
112 
113 	struct intel_gt_info {
114 		intel_engine_mask_t engine_mask;
115 		u8 num_engines;
116 
117 		/* Media engine access to SFC per instance */
118 		u8 vdbox_sfc_access;
119 
120 		/* Slice/subslice/EU info */
121 		struct sseu_dev_info sseu;
122 	} info;
123 };
124 
125 enum intel_gt_scratch_field {
126 	/* 8 bytes */
127 	INTEL_GT_SCRATCH_FIELD_DEFAULT = 0,
128 
129 	/* 8 bytes */
130 	INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128,
131 
132 	/* 8 bytes */
133 	INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256,
134 
135 	/* 6 * 8 bytes */
136 	INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR = 2048,
137 
138 	/* 4 bytes */
139 	INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1 = 2096,
140 };
141 
142 #endif /* __INTEL_GT_TYPES_H__ */
143