1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #ifndef __INTEL_UNCORE_H__
26 #define __INTEL_UNCORE_H__
27 
28 #include <linux/spinlock.h>
29 #include <linux/notifier.h>
30 #include <linux/hrtimer.h>
31 #include <linux/io-64-nonatomic-lo-hi.h>
32 
33 #include "i915_reg.h"
34 
35 struct drm_i915_private;
36 struct intel_runtime_pm;
37 struct intel_uncore;
38 
39 struct intel_uncore_mmio_debug {
40 	spinlock_t lock; /** lock is also taken in irq contexts. */
41 	int unclaimed_mmio_check;
42 	int saved_mmio_check;
43 	u32 suspend_count;
44 };
45 
46 enum forcewake_domain_id {
47 	FW_DOMAIN_ID_RENDER = 0,
48 	FW_DOMAIN_ID_BLITTER,
49 	FW_DOMAIN_ID_MEDIA,
50 	FW_DOMAIN_ID_MEDIA_VDBOX0,
51 	FW_DOMAIN_ID_MEDIA_VDBOX1,
52 	FW_DOMAIN_ID_MEDIA_VDBOX2,
53 	FW_DOMAIN_ID_MEDIA_VDBOX3,
54 	FW_DOMAIN_ID_MEDIA_VEBOX0,
55 	FW_DOMAIN_ID_MEDIA_VEBOX1,
56 
57 	FW_DOMAIN_ID_COUNT
58 };
59 
60 enum forcewake_domains {
61 	FORCEWAKE_RENDER	= BIT(FW_DOMAIN_ID_RENDER),
62 	FORCEWAKE_BLITTER	= BIT(FW_DOMAIN_ID_BLITTER),
63 	FORCEWAKE_MEDIA		= BIT(FW_DOMAIN_ID_MEDIA),
64 	FORCEWAKE_MEDIA_VDBOX0	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX0),
65 	FORCEWAKE_MEDIA_VDBOX1	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX1),
66 	FORCEWAKE_MEDIA_VDBOX2	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX2),
67 	FORCEWAKE_MEDIA_VDBOX3	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX3),
68 	FORCEWAKE_MEDIA_VEBOX0	= BIT(FW_DOMAIN_ID_MEDIA_VEBOX0),
69 	FORCEWAKE_MEDIA_VEBOX1	= BIT(FW_DOMAIN_ID_MEDIA_VEBOX1),
70 
71 	FORCEWAKE_ALL = BIT(FW_DOMAIN_ID_COUNT) - 1
72 };
73 
74 struct intel_uncore_funcs {
75 	void (*force_wake_get)(struct intel_uncore *uncore,
76 			       enum forcewake_domains domains);
77 	void (*force_wake_put)(struct intel_uncore *uncore,
78 			       enum forcewake_domains domains);
79 
80 	enum forcewake_domains (*read_fw_domains)(struct intel_uncore *uncore,
81 						  i915_reg_t r);
82 	enum forcewake_domains (*write_fw_domains)(struct intel_uncore *uncore,
83 						   i915_reg_t r);
84 
85 	u8 (*mmio_readb)(struct intel_uncore *uncore,
86 			 i915_reg_t r, bool trace);
87 	u16 (*mmio_readw)(struct intel_uncore *uncore,
88 			  i915_reg_t r, bool trace);
89 	u32 (*mmio_readl)(struct intel_uncore *uncore,
90 			  i915_reg_t r, bool trace);
91 	u64 (*mmio_readq)(struct intel_uncore *uncore,
92 			  i915_reg_t r, bool trace);
93 
94 	void (*mmio_writeb)(struct intel_uncore *uncore,
95 			    i915_reg_t r, u8 val, bool trace);
96 	void (*mmio_writew)(struct intel_uncore *uncore,
97 			    i915_reg_t r, u16 val, bool trace);
98 	void (*mmio_writel)(struct intel_uncore *uncore,
99 			    i915_reg_t r, u32 val, bool trace);
100 };
101 
102 struct intel_forcewake_range {
103 	u32 start;
104 	u32 end;
105 
106 	enum forcewake_domains domains;
107 };
108 
109 struct intel_uncore {
110 	void __iomem *regs;
111 
112 	struct drm_i915_private *i915;
113 	struct intel_runtime_pm *rpm;
114 
115 	spinlock_t lock; /** lock is also taken in irq contexts. */
116 
117 	unsigned int flags;
118 #define UNCORE_HAS_FORCEWAKE		BIT(0)
119 #define UNCORE_HAS_FPGA_DBG_UNCLAIMED	BIT(1)
120 #define UNCORE_HAS_DBG_UNCLAIMED	BIT(2)
121 #define UNCORE_HAS_FIFO			BIT(3)
122 
123 	const struct intel_forcewake_range *fw_domains_table;
124 	unsigned int fw_domains_table_entries;
125 
126 	struct notifier_block pmic_bus_access_nb;
127 	struct intel_uncore_funcs funcs;
128 
129 	unsigned int fifo_count;
130 
131 	enum forcewake_domains fw_domains;
132 	enum forcewake_domains fw_domains_active;
133 	enum forcewake_domains fw_domains_timer;
134 	enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
135 
136 	struct intel_uncore_forcewake_domain {
137 		struct intel_uncore *uncore;
138 		enum forcewake_domain_id id;
139 		enum forcewake_domains mask;
140 		unsigned int wake_count;
141 		bool active;
142 		struct hrtimer timer;
143 		u32 __iomem *reg_set;
144 		u32 __iomem *reg_ack;
145 	} *fw_domain[FW_DOMAIN_ID_COUNT];
146 
147 	unsigned int user_forcewake_count;
148 
149 	struct intel_uncore_mmio_debug *debug;
150 };
151 
152 /* Iterate over initialised fw domains */
153 #define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__) \
154 	for (tmp__ = (mask__); tmp__ ;) \
155 		for_each_if(domain__ = (uncore__)->fw_domain[__mask_next_bit(tmp__)])
156 
157 #define for_each_fw_domain(domain__, uncore__, tmp__) \
158 	for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__)
159 
160 static inline bool
intel_uncore_has_forcewake(const struct intel_uncore * uncore)161 intel_uncore_has_forcewake(const struct intel_uncore *uncore)
162 {
163 	return uncore->flags & UNCORE_HAS_FORCEWAKE;
164 }
165 
166 static inline bool
intel_uncore_has_fpga_dbg_unclaimed(const struct intel_uncore * uncore)167 intel_uncore_has_fpga_dbg_unclaimed(const struct intel_uncore *uncore)
168 {
169 	return uncore->flags & UNCORE_HAS_FPGA_DBG_UNCLAIMED;
170 }
171 
172 static inline bool
intel_uncore_has_dbg_unclaimed(const struct intel_uncore * uncore)173 intel_uncore_has_dbg_unclaimed(const struct intel_uncore *uncore)
174 {
175 	return uncore->flags & UNCORE_HAS_DBG_UNCLAIMED;
176 }
177 
178 static inline bool
intel_uncore_has_fifo(const struct intel_uncore * uncore)179 intel_uncore_has_fifo(const struct intel_uncore *uncore)
180 {
181 	return uncore->flags & UNCORE_HAS_FIFO;
182 }
183 
184 void
185 intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
186 void intel_uncore_init_early(struct intel_uncore *uncore,
187 			     struct drm_i915_private *i915);
188 int intel_uncore_init_mmio(struct intel_uncore *uncore);
189 void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore);
190 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
191 bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
192 void intel_uncore_fini_mmio(struct intel_uncore *uncore);
193 void intel_uncore_suspend(struct intel_uncore *uncore);
194 void intel_uncore_resume_early(struct intel_uncore *uncore);
195 void intel_uncore_runtime_resume(struct intel_uncore *uncore);
196 
197 void assert_forcewakes_inactive(struct intel_uncore *uncore);
198 void assert_forcewakes_active(struct intel_uncore *uncore,
199 			      enum forcewake_domains fw_domains);
200 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
201 
202 enum forcewake_domains
203 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
204 			       i915_reg_t reg, unsigned int op);
205 #define FW_REG_READ  (1)
206 #define FW_REG_WRITE (2)
207 
208 void intel_uncore_forcewake_get(struct intel_uncore *uncore,
209 				enum forcewake_domains domains);
210 void intel_uncore_forcewake_put(struct intel_uncore *uncore,
211 				enum forcewake_domains domains);
212 /* Like above but the caller must manage the uncore.lock itself.
213  * Must be used with I915_READ_FW and friends.
214  */
215 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
216 					enum forcewake_domains domains);
217 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
218 					enum forcewake_domains domains);
219 
220 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore);
221 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore);
222 
223 int __intel_wait_for_register(struct intel_uncore *uncore,
224 			      i915_reg_t reg,
225 			      u32 mask,
226 			      u32 value,
227 			      unsigned int fast_timeout_us,
228 			      unsigned int slow_timeout_ms,
229 			      u32 *out_value);
230 static inline int
intel_wait_for_register(struct intel_uncore * uncore,i915_reg_t reg,u32 mask,u32 value,unsigned int timeout_ms)231 intel_wait_for_register(struct intel_uncore *uncore,
232 			i915_reg_t reg,
233 			u32 mask,
234 			u32 value,
235 			unsigned int timeout_ms)
236 {
237 	return __intel_wait_for_register(uncore, reg, mask, value, 2,
238 					 timeout_ms, NULL);
239 }
240 
241 int __intel_wait_for_register_fw(struct intel_uncore *uncore,
242 				 i915_reg_t reg,
243 				 u32 mask,
244 				 u32 value,
245 				 unsigned int fast_timeout_us,
246 				 unsigned int slow_timeout_ms,
247 				 u32 *out_value);
248 static inline int
intel_wait_for_register_fw(struct intel_uncore * uncore,i915_reg_t reg,u32 mask,u32 value,unsigned int timeout_ms)249 intel_wait_for_register_fw(struct intel_uncore *uncore,
250 			   i915_reg_t reg,
251 			   u32 mask,
252 			   u32 value,
253 			       unsigned int timeout_ms)
254 {
255 	return __intel_wait_for_register_fw(uncore, reg, mask, value,
256 					    2, timeout_ms, NULL);
257 }
258 
259 /* register access functions */
260 #define __raw_read(x__, s__) \
261 static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \
262 					    i915_reg_t reg) \
263 { \
264 	return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \
265 }
266 
267 #define __raw_write(x__, s__) \
268 static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \
269 					   i915_reg_t reg, u##x__ val) \
270 { \
271 	write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \
272 }
273 __raw_read(8, b)
274 __raw_read(16, w)
275 __raw_read(32, l)
276 __raw_read(64, q)
277 
278 __raw_write(8, b)
279 __raw_write(16, w)
280 __raw_write(32, l)
281 __raw_write(64, q)
282 
283 #undef __raw_read
284 #undef __raw_write
285 
286 #define __uncore_read(name__, x__, s__, trace__) \
287 static inline u##x__ intel_uncore_##name__(struct intel_uncore *uncore, \
288 					   i915_reg_t reg) \
289 { \
290 	return uncore->funcs.mmio_read##s__(uncore, reg, (trace__)); \
291 }
292 
293 #define __uncore_write(name__, x__, s__, trace__) \
294 static inline void intel_uncore_##name__(struct intel_uncore *uncore, \
295 					 i915_reg_t reg, u##x__ val) \
296 { \
297 	uncore->funcs.mmio_write##s__(uncore, reg, val, (trace__)); \
298 }
299 
300 __uncore_read(read8, 8, b, true)
301 __uncore_read(read16, 16, w, true)
302 __uncore_read(read, 32, l, true)
303 __uncore_read(read16_notrace, 16, w, false)
304 __uncore_read(read_notrace, 32, l, false)
305 
306 __uncore_write(write8, 8, b, true)
307 __uncore_write(write16, 16, w, true)
308 __uncore_write(write, 32, l, true)
309 __uncore_write(write_notrace, 32, l, false)
310 
311 /* Be very careful with read/write 64-bit values. On 32-bit machines, they
312  * will be implemented using 2 32-bit writes in an arbitrary order with
313  * an arbitrary delay between them. This can cause the hardware to
314  * act upon the intermediate value, possibly leading to corruption and
315  * machine death. For this reason we do not support I915_WRITE64, or
316  * uncore->funcs.mmio_writeq.
317  *
318  * When reading a 64-bit value as two 32-bit values, the delay may cause
319  * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
320  * occasionally a 64-bit register does not actually support a full readq
321  * and must be read using two 32-bit reads.
322  *
323  * You have been warned.
324  */
325 __uncore_read(read64, 64, q, true)
326 
327 static inline u64
intel_uncore_read64_2x32(struct intel_uncore * uncore,i915_reg_t lower_reg,i915_reg_t upper_reg)328 intel_uncore_read64_2x32(struct intel_uncore *uncore,
329 			 i915_reg_t lower_reg, i915_reg_t upper_reg)
330 {
331 	u32 upper, lower, old_upper, loop = 0;
332 	upper = intel_uncore_read(uncore, upper_reg);
333 	do {
334 		old_upper = upper;
335 		lower = intel_uncore_read(uncore, lower_reg);
336 		upper = intel_uncore_read(uncore, upper_reg);
337 	} while (upper != old_upper && loop++ < 2);
338 	return (u64)upper << 32 | lower;
339 }
340 
341 #define intel_uncore_posting_read(...) ((void)intel_uncore_read_notrace(__VA_ARGS__))
342 #define intel_uncore_posting_read16(...) ((void)intel_uncore_read16_notrace(__VA_ARGS__))
343 
344 #undef __uncore_read
345 #undef __uncore_write
346 
347 /* These are untraced mmio-accessors that are only valid to be used inside
348  * critical sections, such as inside IRQ handlers, where forcewake is explicitly
349  * controlled.
350  *
351  * Think twice, and think again, before using these.
352  *
353  * As an example, these accessors can possibly be used between:
354  *
355  * spin_lock_irq(&uncore->lock);
356  * intel_uncore_forcewake_get__locked();
357  *
358  * and
359  *
360  * intel_uncore_forcewake_put__locked();
361  * spin_unlock_irq(&uncore->lock);
362  *
363  *
364  * Note: some registers may not need forcewake held, so
365  * intel_uncore_forcewake_{get,put} can be omitted, see
366  * intel_uncore_forcewake_for_reg().
367  *
368  * Certain architectures will die if the same cacheline is concurrently accessed
369  * by different clients (e.g. on Ivybridge). Access to registers should
370  * therefore generally be serialised, by either the dev_priv->uncore.lock or
371  * a more localised lock guarding all access to that bank of registers.
372  */
373 #define intel_uncore_read_fw(...) __raw_uncore_read32(__VA_ARGS__)
374 #define intel_uncore_write_fw(...) __raw_uncore_write32(__VA_ARGS__)
375 #define intel_uncore_write64_fw(...) __raw_uncore_write64(__VA_ARGS__)
376 #define intel_uncore_posting_read_fw(...) ((void)intel_uncore_read_fw(__VA_ARGS__))
377 
intel_uncore_rmw(struct intel_uncore * uncore,i915_reg_t reg,u32 clear,u32 set)378 static inline void intel_uncore_rmw(struct intel_uncore *uncore,
379 				    i915_reg_t reg, u32 clear, u32 set)
380 {
381 	u32 val;
382 
383 	val = intel_uncore_read(uncore, reg);
384 	val &= ~clear;
385 	val |= set;
386 	intel_uncore_write(uncore, reg, val);
387 }
388 
intel_uncore_rmw_fw(struct intel_uncore * uncore,i915_reg_t reg,u32 clear,u32 set)389 static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore,
390 				       i915_reg_t reg, u32 clear, u32 set)
391 {
392 	u32 val;
393 
394 	val = intel_uncore_read_fw(uncore, reg);
395 	val &= ~clear;
396 	val |= set;
397 	intel_uncore_write_fw(uncore, reg, val);
398 }
399 
intel_uncore_write_and_verify(struct intel_uncore * uncore,i915_reg_t reg,u32 val,u32 mask,u32 expected_val)400 static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,
401 						i915_reg_t reg, u32 val,
402 						u32 mask, u32 expected_val)
403 {
404 	u32 reg_val;
405 
406 	intel_uncore_write(uncore, reg, val);
407 	reg_val = intel_uncore_read(uncore, reg);
408 
409 	return (reg_val & mask) != expected_val ? -EINVAL : 0;
410 }
411 
412 #define raw_reg_read(base, reg) \
413 	readl(base + i915_mmio_reg_offset(reg))
414 #define raw_reg_write(base, reg, value) \
415 	writel(value, base + i915_mmio_reg_offset(reg))
416 
417 #endif /* !__INTEL_UNCORE_H__ */
418