1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #ifndef __MSM_GPU_H__
8 #define __MSM_GPU_H__
9 
10 #include <linux/adreno-smmu-priv.h>
11 #include <linux/clk.h>
12 #include <linux/interconnect.h>
13 #include <linux/pm_opp.h>
14 #include <linux/regulator/consumer.h>
15 
16 #include "msm_drv.h"
17 #include "msm_fence.h"
18 #include "msm_ringbuffer.h"
19 #include "msm_gem.h"
20 
21 struct msm_gem_submit;
22 struct msm_gpu_perfcntr;
23 struct msm_gpu_state;
24 
25 struct msm_gpu_config {
26 	const char *ioname;
27 	unsigned int nr_rings;
28 };
29 
30 /* So far, with hardware that I've seen to date, we can have:
31  *  + zero, one, or two z180 2d cores
32  *  + a3xx or a2xx 3d core, which share a common CP (the firmware
33  *    for the CP seems to implement some different PM4 packet types
34  *    but the basics of cmdstream submission are the same)
35  *
36  * Which means that the eventual complete "class" hierarchy, once
37  * support for all past and present hw is in place, becomes:
38  *  + msm_gpu
39  *    + adreno_gpu
40  *      + a3xx_gpu
41  *      + a2xx_gpu
42  *    + z180_gpu
43  */
44 struct msm_gpu_funcs {
45 	int (*get_param)(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
46 	int (*hw_init)(struct msm_gpu *gpu);
47 	int (*pm_suspend)(struct msm_gpu *gpu);
48 	int (*pm_resume)(struct msm_gpu *gpu);
49 	void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
50 	void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
51 	irqreturn_t (*irq)(struct msm_gpu *irq);
52 	struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
53 	void (*recover)(struct msm_gpu *gpu);
54 	void (*destroy)(struct msm_gpu *gpu);
55 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
56 	/* show GPU status in debugfs: */
57 	void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
58 			struct drm_printer *p);
59 	/* for generation specific debugfs: */
60 	void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
61 #endif
62 	unsigned long (*gpu_busy)(struct msm_gpu *gpu);
63 	struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
64 	int (*gpu_state_put)(struct msm_gpu_state *state);
65 	unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
66 	void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp);
67 	struct msm_gem_address_space *(*create_address_space)
68 		(struct msm_gpu *gpu, struct platform_device *pdev);
69 	struct msm_gem_address_space *(*create_private_address_space)
70 		(struct msm_gpu *gpu);
71 	uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
72 };
73 
74 /* Additional state for iommu faults: */
75 struct msm_gpu_fault_info {
76 	u64 ttbr0;
77 	unsigned long iova;
78 	int flags;
79 	const char *type;
80 	const char *block;
81 };
82 
83 /**
84  * struct msm_gpu_devfreq - devfreq related state
85  */
86 struct msm_gpu_devfreq {
87 	/** devfreq: devfreq instance */
88 	struct devfreq *devfreq;
89 
90 	/**
91 	 * busy_cycles:
92 	 *
93 	 * Used by implementation of gpu->gpu_busy() to track the last
94 	 * busy counter value, for calculating elapsed busy cycles since
95 	 * last sampling period.
96 	 */
97 	u64 busy_cycles;
98 
99 	/** time: Time of last sampling period. */
100 	ktime_t time;
101 
102 	/** idle_time: Time of last transition to idle: */
103 	ktime_t idle_time;
104 
105 	/**
106 	 * idle_freq:
107 	 *
108 	 * Shadow frequency used while the GPU is idle.  From the PoV of
109 	 * the devfreq governor, we are continuing to sample busyness and
110 	 * adjust frequency while the GPU is idle, but we use this shadow
111 	 * value as the GPU is actually clamped to minimum frequency while
112 	 * it is inactive.
113 	 */
114 	unsigned long idle_freq;
115 };
116 
117 struct msm_gpu {
118 	const char *name;
119 	struct drm_device *dev;
120 	struct platform_device *pdev;
121 	const struct msm_gpu_funcs *funcs;
122 
123 	struct adreno_smmu_priv adreno_smmu;
124 
125 	/* performance counters (hw & sw): */
126 	spinlock_t perf_lock;
127 	bool perfcntr_active;
128 	struct {
129 		bool active;
130 		ktime_t time;
131 	} last_sample;
132 	uint32_t totaltime, activetime;    /* sw counters */
133 	uint32_t last_cntrs[5];            /* hw counters */
134 	const struct msm_gpu_perfcntr *perfcntrs;
135 	uint32_t num_perfcntrs;
136 
137 	struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
138 	int nr_rings;
139 
140 	/*
141 	 * List of GEM active objects on this gpu.  Protected by
142 	 * msm_drm_private::mm_lock
143 	 */
144 	struct list_head active_list;
145 
146 	/**
147 	 * active_submits:
148 	 *
149 	 * The number of submitted but not yet retired submits, used to
150 	 * determine transitions between active and idle.
151 	 *
152 	 * Protected by lock
153 	 */
154 	int active_submits;
155 
156 	/** lock: protects active_submits and idle/active transitions */
157 	struct mutex active_lock;
158 
159 	/* does gpu need hw_init? */
160 	bool needs_hw_init;
161 
162 	/* number of GPU hangs (for all contexts) */
163 	int global_faults;
164 
165 	void __iomem *mmio;
166 	int irq;
167 
168 	struct msm_gem_address_space *aspace;
169 
170 	/* Power Control: */
171 	struct regulator *gpu_reg, *gpu_cx;
172 	struct clk_bulk_data *grp_clks;
173 	int nr_clocks;
174 	struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
175 	uint32_t fast_rate;
176 
177 	/* Hang and Inactivity Detection:
178 	 */
179 #define DRM_MSM_INACTIVE_PERIOD   66 /* in ms (roughly four frames) */
180 
181 #define DRM_MSM_HANGCHECK_DEFAULT_PERIOD 500 /* in ms */
182 	struct timer_list hangcheck_timer;
183 
184 	/* Fault info for most recent iova fault: */
185 	struct msm_gpu_fault_info fault_info;
186 
187 	/* work for handling GPU ioval faults: */
188 	struct kthread_work fault_work;
189 
190 	/* work for handling GPU recovery: */
191 	struct kthread_work recover_work;
192 
193 	/* work for handling active-list retiring: */
194 	struct kthread_work retire_work;
195 
196 	/* worker for retire/recover: */
197 	struct kthread_worker *worker;
198 
199 	struct drm_gem_object *memptrs_bo;
200 
201 	struct msm_gpu_devfreq devfreq;
202 
203 	uint32_t suspend_count;
204 
205 	struct msm_gpu_state *crashstate;
206 
207 	/* Enable clamping to idle freq when inactive: */
208 	bool clamp_to_idle;
209 
210 	/* True if the hardware supports expanded apriv (a650 and newer) */
211 	bool hw_apriv;
212 
213 	struct thermal_cooling_device *cooling;
214 };
215 
dev_to_gpu(struct device * dev)216 static inline struct msm_gpu *dev_to_gpu(struct device *dev)
217 {
218 	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
219 	return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
220 }
221 
222 /* It turns out that all targets use the same ringbuffer size */
223 #define MSM_GPU_RINGBUFFER_SZ SZ_32K
224 #define MSM_GPU_RINGBUFFER_BLKSIZE 32
225 
226 #define MSM_GPU_RB_CNTL_DEFAULT \
227 		(AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \
228 		AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
229 
msm_gpu_active(struct msm_gpu * gpu)230 static inline bool msm_gpu_active(struct msm_gpu *gpu)
231 {
232 	int i;
233 
234 	for (i = 0; i < gpu->nr_rings; i++) {
235 		struct msm_ringbuffer *ring = gpu->rb[i];
236 
237 		if (ring->seqno > ring->memptrs->fence)
238 			return true;
239 	}
240 
241 	return false;
242 }
243 
244 /* Perf-Counters:
245  * The select_reg and select_val are just there for the benefit of the child
246  * class that actually enables the perf counter..  but msm_gpu base class
247  * will handle sampling/displaying the counters.
248  */
249 
250 struct msm_gpu_perfcntr {
251 	uint32_t select_reg;
252 	uint32_t sample_reg;
253 	uint32_t select_val;
254 	const char *name;
255 };
256 
257 /*
258  * The number of priority levels provided by drm gpu scheduler.  The
259  * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some
260  * cases, so we don't use it (no need for kernel generated jobs).
261  */
262 #define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN)
263 
264 /**
265  * struct msm_file_private - per-drm_file context
266  *
267  * @queuelock:    synchronizes access to submitqueues list
268  * @submitqueues: list of &msm_gpu_submitqueue created by userspace
269  * @queueid:      counter incremented each time a submitqueue is created,
270  *                used to assign &msm_gpu_submitqueue.id
271  * @aspace:       the per-process GPU address-space
272  * @ref:          reference count
273  * @seqno:        unique per process seqno
274  */
275 struct msm_file_private {
276 	rwlock_t queuelock;
277 	struct list_head submitqueues;
278 	int queueid;
279 	struct msm_gem_address_space *aspace;
280 	struct kref ref;
281 	int seqno;
282 
283 	/**
284 	 * entities:
285 	 *
286 	 * Table of per-priority-level sched entities used by submitqueues
287 	 * associated with this &drm_file.  Because some userspace apps
288 	 * make assumptions about rendering from multiple gl contexts
289 	 * (of the same priority) within the process happening in FIFO
290 	 * order without requiring any fencing beyond MakeCurrent(), we
291 	 * create at most one &drm_sched_entity per-process per-priority-
292 	 * level.
293 	 */
294 	struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
295 };
296 
297 /**
298  * msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
299  *
300  * @gpu:        the gpu instance
301  * @prio:       the userspace priority level
302  * @ring_nr:    [out] the ringbuffer the userspace priority maps to
303  * @sched_prio: [out] the gpu scheduler priority level which the userspace
304  *              priority maps to
305  *
306  * With drm/scheduler providing it's own level of prioritization, our total
307  * number of available priority levels is (nr_rings * NR_SCHED_PRIORITIES).
308  * Each ring is associated with it's own scheduler instance.  However, our
309  * UABI is that lower numerical values are higher priority.  So mapping the
310  * single userspace priority level into ring_nr and sched_prio takes some
311  * care.  The userspace provided priority (when a submitqueue is created)
312  * is mapped to ring nr and scheduler priority as such:
313  *
314  *   ring_nr    = userspace_prio / NR_SCHED_PRIORITIES
315  *   sched_prio = NR_SCHED_PRIORITIES -
316  *                (userspace_prio % NR_SCHED_PRIORITIES) - 1
317  *
318  * This allows generations without preemption (nr_rings==1) to have some
319  * amount of prioritization, and provides more priority levels for gens
320  * that do have preemption.
321  */
msm_gpu_convert_priority(struct msm_gpu * gpu,int prio,unsigned * ring_nr,enum drm_sched_priority * sched_prio)322 static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
323 		unsigned *ring_nr, enum drm_sched_priority *sched_prio)
324 {
325 	unsigned rn, sp;
326 
327 	rn = div_u64_rem(prio, NR_SCHED_PRIORITIES, &sp);
328 
329 	/* invert sched priority to map to higher-numeric-is-higher-
330 	 * priority convention
331 	 */
332 	sp = NR_SCHED_PRIORITIES - sp - 1;
333 
334 	if (rn >= gpu->nr_rings)
335 		return -EINVAL;
336 
337 	*ring_nr = rn;
338 	*sched_prio = sp;
339 
340 	return 0;
341 }
342 
343 /**
344  * struct msm_gpu_submitqueues - Userspace created context.
345  *
346  * A submitqueue is associated with a gl context or vk queue (or equiv)
347  * in userspace.
348  *
349  * @id:        userspace id for the submitqueue, unique within the drm_file
350  * @flags:     userspace flags for the submitqueue, specified at creation
351  *             (currently unusued)
352  * @ring_nr:   the ringbuffer used by this submitqueue, which is determined
353  *             by the submitqueue's priority
354  * @faults:    the number of GPU hangs associated with this submitqueue
355  * @ctx:       the per-drm_file context associated with the submitqueue (ie.
356  *             which set of pgtables do submits jobs associated with the
357  *             submitqueue use)
358  * @node:      node in the context's list of submitqueues
359  * @fence_idr: maps fence-id to dma_fence for userspace visible fence
360  *             seqno, protected by submitqueue lock
361  * @lock:      submitqueue lock
362  * @ref:       reference count
363  * @entity:    the submit job-queue
364  */
365 struct msm_gpu_submitqueue {
366 	int id;
367 	u32 flags;
368 	u32 ring_nr;
369 	int faults;
370 	struct msm_file_private *ctx;
371 	struct list_head node;
372 	struct idr fence_idr;
373 	struct mutex lock;
374 	struct kref ref;
375 	struct drm_sched_entity *entity;
376 };
377 
378 struct msm_gpu_state_bo {
379 	u64 iova;
380 	size_t size;
381 	void *data;
382 	bool encoded;
383 };
384 
385 struct msm_gpu_state {
386 	struct kref ref;
387 	struct timespec64 time;
388 
389 	struct {
390 		u64 iova;
391 		u32 fence;
392 		u32 seqno;
393 		u32 rptr;
394 		u32 wptr;
395 		void *data;
396 		int data_size;
397 		bool encoded;
398 	} ring[MSM_GPU_MAX_RINGS];
399 
400 	int nr_registers;
401 	u32 *registers;
402 
403 	u32 rbbm_status;
404 
405 	char *comm;
406 	char *cmd;
407 
408 	struct msm_gpu_fault_info fault_info;
409 
410 	int nr_bos;
411 	struct msm_gpu_state_bo *bos;
412 };
413 
gpu_write(struct msm_gpu * gpu,u32 reg,u32 data)414 static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
415 {
416 	msm_writel(data, gpu->mmio + (reg << 2));
417 }
418 
gpu_read(struct msm_gpu * gpu,u32 reg)419 static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
420 {
421 	return msm_readl(gpu->mmio + (reg << 2));
422 }
423 
gpu_rmw(struct msm_gpu * gpu,u32 reg,u32 mask,u32 or)424 static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
425 {
426 	msm_rmw(gpu->mmio + (reg << 2), mask, or);
427 }
428 
gpu_read64(struct msm_gpu * gpu,u32 lo,u32 hi)429 static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
430 {
431 	u64 val;
432 
433 	/*
434 	 * Why not a readq here? Two reasons: 1) many of the LO registers are
435 	 * not quad word aligned and 2) the GPU hardware designers have a bit
436 	 * of a history of putting registers where they fit, especially in
437 	 * spins. The longer a GPU family goes the higher the chance that
438 	 * we'll get burned.  We could do a series of validity checks if we
439 	 * wanted to, but really is a readq() that much better? Nah.
440 	 */
441 
442 	/*
443 	 * For some lo/hi registers (like perfcounters), the hi value is latched
444 	 * when the lo is read, so make sure to read the lo first to trigger
445 	 * that
446 	 */
447 	val = (u64) msm_readl(gpu->mmio + (lo << 2));
448 	val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
449 
450 	return val;
451 }
452 
gpu_write64(struct msm_gpu * gpu,u32 lo,u32 hi,u64 val)453 static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
454 {
455 	/* Why not a writeq here? Read the screed above */
456 	msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
457 	msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
458 }
459 
460 int msm_gpu_pm_suspend(struct msm_gpu *gpu);
461 int msm_gpu_pm_resume(struct msm_gpu *gpu);
462 
463 int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
464 struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
465 		u32 id);
466 int msm_submitqueue_create(struct drm_device *drm,
467 		struct msm_file_private *ctx,
468 		u32 prio, u32 flags, u32 *id);
469 int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
470 		struct drm_msm_submitqueue_query *args);
471 int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
472 void msm_submitqueue_close(struct msm_file_private *ctx);
473 
474 void msm_submitqueue_destroy(struct kref *kref);
475 
476 void __msm_file_private_destroy(struct kref *kref);
477 
msm_file_private_put(struct msm_file_private * ctx)478 static inline void msm_file_private_put(struct msm_file_private *ctx)
479 {
480 	kref_put(&ctx->ref, __msm_file_private_destroy);
481 }
482 
msm_file_private_get(struct msm_file_private * ctx)483 static inline struct msm_file_private *msm_file_private_get(
484 	struct msm_file_private *ctx)
485 {
486 	kref_get(&ctx->ref);
487 	return ctx;
488 }
489 
490 void msm_devfreq_init(struct msm_gpu *gpu);
491 void msm_devfreq_cleanup(struct msm_gpu *gpu);
492 void msm_devfreq_resume(struct msm_gpu *gpu);
493 void msm_devfreq_suspend(struct msm_gpu *gpu);
494 void msm_devfreq_active(struct msm_gpu *gpu);
495 void msm_devfreq_idle(struct msm_gpu *gpu);
496 
497 int msm_gpu_hw_init(struct msm_gpu *gpu);
498 
499 void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
500 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
501 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
502 		uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
503 
504 void msm_gpu_retire(struct msm_gpu *gpu);
505 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
506 
507 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
508 		struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
509 		const char *name, struct msm_gpu_config *config);
510 
511 struct msm_gem_address_space *
512 msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
513 
514 void msm_gpu_cleanup(struct msm_gpu *gpu);
515 
516 struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
517 void __init adreno_register(void);
518 void __exit adreno_unregister(void);
519 
msm_submitqueue_put(struct msm_gpu_submitqueue * queue)520 static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
521 {
522 	if (queue)
523 		kref_put(&queue->ref, msm_submitqueue_destroy);
524 }
525 
msm_gpu_crashstate_get(struct msm_gpu * gpu)526 static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
527 {
528 	struct msm_gpu_state *state = NULL;
529 
530 	mutex_lock(&gpu->dev->struct_mutex);
531 
532 	if (gpu->crashstate) {
533 		kref_get(&gpu->crashstate->ref);
534 		state = gpu->crashstate;
535 	}
536 
537 	mutex_unlock(&gpu->dev->struct_mutex);
538 
539 	return state;
540 }
541 
msm_gpu_crashstate_put(struct msm_gpu * gpu)542 static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
543 {
544 	mutex_lock(&gpu->dev->struct_mutex);
545 
546 	if (gpu->crashstate) {
547 		if (gpu->funcs->gpu_state_put(gpu->crashstate))
548 			gpu->crashstate = NULL;
549 	}
550 
551 	mutex_unlock(&gpu->dev->struct_mutex);
552 }
553 
554 /*
555  * Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
556  * support expanded privileges
557  */
558 #define check_apriv(gpu, flags) \
559 	(((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))
560 
561 
562 #endif /* __MSM_GPU_H__ */
563