1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 *
6 * Copyright (c) 2014,2017, 2019 The Linux Foundation. All rights reserved.
7 */
8
9 #ifndef __ADRENO_GPU_H__
10 #define __ADRENO_GPU_H__
11
12 #include <linux/firmware.h>
13 #include <linux/iopoll.h>
14
15 #include "msm_gpu.h"
16
17 #include "adreno_common.xml.h"
18 #include "adreno_pm4.xml.h"
19
20 extern bool snapshot_debugbus;
21
22 enum {
23 ADRENO_FW_PM4 = 0,
24 ADRENO_FW_SQE = 0, /* a6xx */
25 ADRENO_FW_PFP = 1,
26 ADRENO_FW_GMU = 1, /* a6xx */
27 ADRENO_FW_GPMU = 2,
28 ADRENO_FW_MAX,
29 };
30
31 enum adreno_quirks {
32 ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
33 ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
34 ADRENO_QUIRK_LMLOADKILL_DISABLE = 3,
35 };
36
37 struct adreno_rev {
38 uint8_t core;
39 uint8_t major;
40 uint8_t minor;
41 uint8_t patchid;
42 };
43
44 #define ADRENO_REV(core, major, minor, patchid) \
45 ((struct adreno_rev){ core, major, minor, patchid })
46
47 struct adreno_gpu_funcs {
48 struct msm_gpu_funcs base;
49 int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
50 };
51
52 struct adreno_reglist {
53 u32 offset;
54 u32 value;
55 };
56
57 extern const struct adreno_reglist a630_hwcg[], a640_hwcg[], a650_hwcg[];
58
59 struct adreno_info {
60 struct adreno_rev rev;
61 uint32_t revn;
62 const char *name;
63 const char *fw[ADRENO_FW_MAX];
64 uint32_t gmem;
65 enum adreno_quirks quirks;
66 struct msm_gpu *(*init)(struct drm_device *dev);
67 const char *zapfw;
68 u32 inactive_period;
69 const struct adreno_reglist *hwcg;
70 };
71
72 const struct adreno_info *adreno_info(struct adreno_rev rev);
73
74 struct adreno_gpu {
75 struct msm_gpu base;
76 struct adreno_rev rev;
77 const struct adreno_info *info;
78 uint32_t gmem; /* actual gmem size */
79 uint32_t revn; /* numeric revision name */
80 const struct adreno_gpu_funcs *funcs;
81
82 /* interesting register offsets to dump: */
83 const unsigned int *registers;
84
85 /*
86 * Are we loading fw from legacy path? Prior to addition
87 * of gpu firmware to linux-firmware, the fw files were
88 * placed in toplevel firmware directory, following qcom's
89 * android kernel. But linux-firmware preferred they be
90 * placed in a 'qcom' subdirectory.
91 *
92 * For backwards compatibility, we try first to load from
93 * the new path, using request_firmware_direct() to avoid
94 * any potential timeout waiting for usermode helper, then
95 * fall back to the old path (with direct load). And
96 * finally fall back to request_firmware() with the new
97 * path to allow the usermode helper.
98 */
99 enum {
100 FW_LOCATION_UNKNOWN = 0,
101 FW_LOCATION_NEW, /* /lib/firmware/qcom/$fwfile */
102 FW_LOCATION_LEGACY, /* /lib/firmware/$fwfile */
103 FW_LOCATION_HELPER,
104 } fwloc;
105
106 /* firmware: */
107 const struct firmware *fw[ADRENO_FW_MAX];
108
109 /*
110 * Register offsets are different between some GPUs.
111 * GPU specific offsets will be exported by GPU specific
112 * code (a3xx_gpu.c) and stored in this common location.
113 */
114 const unsigned int *reg_offsets;
115 };
116 #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
117
118 struct adreno_ocmem {
119 struct ocmem *ocmem;
120 unsigned long base;
121 void *hdl;
122 };
123
124 /* platform config data (ie. from DT, or pdata) */
125 struct adreno_platform_config {
126 struct adreno_rev rev;
127 };
128
129 #define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
130
131 #define spin_until(X) ({ \
132 int __ret = -ETIMEDOUT; \
133 unsigned long __t = jiffies + ADRENO_IDLE_TIMEOUT; \
134 do { \
135 if (X) { \
136 __ret = 0; \
137 break; \
138 } \
139 } while (time_before(jiffies, __t)); \
140 __ret; \
141 })
142
adreno_is_a2xx(struct adreno_gpu * gpu)143 static inline bool adreno_is_a2xx(struct adreno_gpu *gpu)
144 {
145 return (gpu->revn < 300);
146 }
147
adreno_is_a20x(struct adreno_gpu * gpu)148 static inline bool adreno_is_a20x(struct adreno_gpu *gpu)
149 {
150 return (gpu->revn < 210);
151 }
152
adreno_is_a225(struct adreno_gpu * gpu)153 static inline bool adreno_is_a225(struct adreno_gpu *gpu)
154 {
155 return gpu->revn == 225;
156 }
157
adreno_is_a305(struct adreno_gpu * gpu)158 static inline bool adreno_is_a305(struct adreno_gpu *gpu)
159 {
160 return gpu->revn == 305;
161 }
162
adreno_is_a306(struct adreno_gpu * gpu)163 static inline bool adreno_is_a306(struct adreno_gpu *gpu)
164 {
165 /* yes, 307, because a305c is 306 */
166 return gpu->revn == 307;
167 }
168
adreno_is_a320(struct adreno_gpu * gpu)169 static inline bool adreno_is_a320(struct adreno_gpu *gpu)
170 {
171 return gpu->revn == 320;
172 }
173
adreno_is_a330(struct adreno_gpu * gpu)174 static inline bool adreno_is_a330(struct adreno_gpu *gpu)
175 {
176 return gpu->revn == 330;
177 }
178
adreno_is_a330v2(struct adreno_gpu * gpu)179 static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
180 {
181 return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
182 }
183
adreno_is_a405(struct adreno_gpu * gpu)184 static inline int adreno_is_a405(struct adreno_gpu *gpu)
185 {
186 return gpu->revn == 405;
187 }
188
adreno_is_a420(struct adreno_gpu * gpu)189 static inline int adreno_is_a420(struct adreno_gpu *gpu)
190 {
191 return gpu->revn == 420;
192 }
193
adreno_is_a430(struct adreno_gpu * gpu)194 static inline int adreno_is_a430(struct adreno_gpu *gpu)
195 {
196 return gpu->revn == 430;
197 }
198
adreno_is_a510(struct adreno_gpu * gpu)199 static inline int adreno_is_a510(struct adreno_gpu *gpu)
200 {
201 return gpu->revn == 510;
202 }
203
adreno_is_a530(struct adreno_gpu * gpu)204 static inline int adreno_is_a530(struct adreno_gpu *gpu)
205 {
206 return gpu->revn == 530;
207 }
208
adreno_is_a540(struct adreno_gpu * gpu)209 static inline int adreno_is_a540(struct adreno_gpu *gpu)
210 {
211 return gpu->revn == 540;
212 }
213
adreno_is_a618(struct adreno_gpu * gpu)214 static inline int adreno_is_a618(struct adreno_gpu *gpu)
215 {
216 return gpu->revn == 618;
217 }
218
adreno_is_a630(struct adreno_gpu * gpu)219 static inline int adreno_is_a630(struct adreno_gpu *gpu)
220 {
221 return gpu->revn == 630;
222 }
223
adreno_is_a640(struct adreno_gpu * gpu)224 static inline int adreno_is_a640(struct adreno_gpu *gpu)
225 {
226 return gpu->revn == 640;
227 }
228
adreno_is_a650(struct adreno_gpu * gpu)229 static inline int adreno_is_a650(struct adreno_gpu *gpu)
230 {
231 return gpu->revn == 650;
232 }
233
234 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
235 const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
236 const char *fwname);
237 struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
238 const struct firmware *fw, u64 *iova);
239 int adreno_hw_init(struct msm_gpu *gpu);
240 void adreno_recover(struct msm_gpu *gpu);
241 void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg);
242 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
243 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
244 void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
245 struct drm_printer *p);
246 #endif
247 void adreno_dump_info(struct msm_gpu *gpu);
248 void adreno_dump(struct msm_gpu *gpu);
249 void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
250 struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
251
252 int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
253 struct adreno_ocmem *ocmem);
254 void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *ocmem);
255
256 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
257 struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
258 int nr_rings);
259 void adreno_gpu_cleanup(struct adreno_gpu *gpu);
260 int adreno_load_fw(struct adreno_gpu *adreno_gpu);
261
262 void adreno_gpu_state_destroy(struct msm_gpu_state *state);
263
264 int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
265 int adreno_gpu_state_put(struct msm_gpu_state *state);
266
267 /*
268 * Common helper function to initialize the default address space for arm-smmu
269 * attached targets
270 */
271 struct msm_gem_address_space *
272 adreno_iommu_create_address_space(struct msm_gpu *gpu,
273 struct platform_device *pdev);
274
275 /*
276 * For a5xx and a6xx targets load the zap shader that is used to pull the GPU
277 * out of secure mode
278 */
279 int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid);
280
281 /* ringbuffer helpers (the parts that are adreno specific) */
282
283 static inline void
OUT_PKT0(struct msm_ringbuffer * ring,uint16_t regindx,uint16_t cnt)284 OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
285 {
286 adreno_wait_ring(ring, cnt+1);
287 OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
288 }
289
290 /* no-op packet: */
291 static inline void
OUT_PKT2(struct msm_ringbuffer * ring)292 OUT_PKT2(struct msm_ringbuffer *ring)
293 {
294 adreno_wait_ring(ring, 1);
295 OUT_RING(ring, CP_TYPE2_PKT);
296 }
297
298 static inline void
OUT_PKT3(struct msm_ringbuffer * ring,uint8_t opcode,uint16_t cnt)299 OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
300 {
301 adreno_wait_ring(ring, cnt+1);
302 OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
303 }
304
PM4_PARITY(u32 val)305 static inline u32 PM4_PARITY(u32 val)
306 {
307 return (0x9669 >> (0xF & (val ^
308 (val >> 4) ^ (val >> 8) ^ (val >> 12) ^
309 (val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^
310 (val >> 28)))) & 1;
311 }
312
313 /* Maximum number of values that can be executed for one opcode */
314 #define TYPE4_MAX_PAYLOAD 127
315
316 #define PKT4(_reg, _cnt) \
317 (CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \
318 (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27))
319
320 static inline void
OUT_PKT4(struct msm_ringbuffer * ring,uint16_t regindx,uint16_t cnt)321 OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
322 {
323 adreno_wait_ring(ring, cnt + 1);
324 OUT_RING(ring, PKT4(regindx, cnt));
325 }
326
327 static inline void
OUT_PKT7(struct msm_ringbuffer * ring,uint8_t opcode,uint16_t cnt)328 OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
329 {
330 adreno_wait_ring(ring, cnt + 1);
331 OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
332 ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
333 }
334
335 struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
336 struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
337 struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
338 struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
339 struct msm_gpu *a6xx_gpu_init(struct drm_device *dev);
340
get_wptr(struct msm_ringbuffer * ring)341 static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
342 {
343 return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
344 }
345
346 /*
347 * Given a register and a count, return a value to program into
348 * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
349 * registers starting at _reg.
350 *
351 * The register base needs to be a multiple of the length. If it is not, the
352 * hardware will quietly mask off the bits for you and shift the size. For
353 * example, if you intend the protection to start at 0x07 for a length of 4
354 * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might
355 * expose registers you intended to protect!
356 */
357 #define ADRENO_PROTECT_RW(_reg, _len) \
358 ((1 << 30) | (1 << 29) | \
359 ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
360
361 /*
362 * Same as above, but allow reads over the range. For areas of mixed use (such
363 * as performance counters) this allows us to protect a much larger range with a
364 * single register
365 */
366 #define ADRENO_PROTECT_RDONLY(_reg, _len) \
367 ((1 << 29) \
368 ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
369
370
371 #define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
372 readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
373 interval, timeout)
374
375 #endif /* __ADRENO_GPU_H__ */
376