Lines Matching full:gpu

45 	int (*get_param)(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
46 int (*hw_init)(struct msm_gpu *gpu);
47 int (*pm_suspend)(struct msm_gpu *gpu);
48 int (*pm_resume)(struct msm_gpu *gpu);
49 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
50 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
52 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
53 void (*recover)(struct msm_gpu *gpu);
54 void (*destroy)(struct msm_gpu *gpu);
56 /* show GPU status in debugfs: */
57 void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
60 void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
62 unsigned long (*gpu_busy)(struct msm_gpu *gpu);
63 struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
65 unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
66 void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp);
68 (struct msm_gpu *gpu, struct platform_device *pdev);
70 (struct msm_gpu *gpu);
71 uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
93 * Used by implementation of gpu->gpu_busy() to track the last
108 * Shadow frequency used while the GPU is idle. From the PoV of
110 * adjust frequency while the GPU is idle, but we use this shadow
111 * value as the GPU is actually clamped to minimum frequency while
141 * List of GEM active objects on this gpu. Protected by
159 /* does gpu need hw_init? */
162 /* number of GPU hangs (for all contexts) */
187 /* work for handling GPU ioval faults: */
190 /* work for handling GPU recovery: */
230 static inline bool msm_gpu_active(struct msm_gpu *gpu) in msm_gpu_active() argument
234 for (i = 0; i < gpu->nr_rings; i++) { in msm_gpu_active()
235 struct msm_ringbuffer *ring = gpu->rb[i]; in msm_gpu_active()
258 * The number of priority levels provided by drm gpu scheduler. The
271 * @aspace: the per-process GPU address-space
300 * @gpu: the gpu instance
303 * @sched_prio: [out] the gpu scheduler priority level which the userspace
322 static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio, in msm_gpu_convert_priority() argument
334 if (rn >= gpu->nr_rings) in msm_gpu_convert_priority()
354 * @faults: the number of GPU hangs associated with this submitqueue
414 static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data) in gpu_write() argument
416 msm_writel(data, gpu->mmio + (reg << 2)); in gpu_write()
419 static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg) in gpu_read() argument
421 return msm_readl(gpu->mmio + (reg << 2)); in gpu_read()
424 static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or) in gpu_rmw() argument
426 msm_rmw(gpu->mmio + (reg << 2), mask, or); in gpu_rmw()
429 static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi) in gpu_read64() argument
435 * not quad word aligned and 2) the GPU hardware designers have a bit in gpu_read64()
437 * spins. The longer a GPU family goes the higher the chance that in gpu_read64()
447 val = (u64) msm_readl(gpu->mmio + (lo << 2)); in gpu_read64()
448 val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32); in gpu_read64()
453 static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val) in gpu_write64() argument
456 msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2)); in gpu_write64()
457 msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2)); in gpu_write64()
460 int msm_gpu_pm_suspend(struct msm_gpu *gpu);
461 int msm_gpu_pm_resume(struct msm_gpu *gpu);
490 void msm_devfreq_init(struct msm_gpu *gpu);
491 void msm_devfreq_cleanup(struct msm_gpu *gpu);
492 void msm_devfreq_resume(struct msm_gpu *gpu);
493 void msm_devfreq_suspend(struct msm_gpu *gpu);
494 void msm_devfreq_active(struct msm_gpu *gpu);
495 void msm_devfreq_idle(struct msm_gpu *gpu);
497 int msm_gpu_hw_init(struct msm_gpu *gpu);
499 void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
500 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
501 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
504 void msm_gpu_retire(struct msm_gpu *gpu);
505 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
508 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
512 msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
514 void msm_gpu_cleanup(struct msm_gpu *gpu);
526 static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu) in msm_gpu_crashstate_get() argument
530 mutex_lock(&gpu->dev->struct_mutex); in msm_gpu_crashstate_get()
532 if (gpu->crashstate) { in msm_gpu_crashstate_get()
533 kref_get(&gpu->crashstate->ref); in msm_gpu_crashstate_get()
534 state = gpu->crashstate; in msm_gpu_crashstate_get()
537 mutex_unlock(&gpu->dev->struct_mutex); in msm_gpu_crashstate_get()
542 static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu) in msm_gpu_crashstate_put() argument
544 mutex_lock(&gpu->dev->struct_mutex); in msm_gpu_crashstate_put()
546 if (gpu->crashstate) { in msm_gpu_crashstate_put()
547 if (gpu->funcs->gpu_state_put(gpu->crashstate)) in msm_gpu_crashstate_put()
548 gpu->crashstate = NULL; in msm_gpu_crashstate_put()
551 mutex_unlock(&gpu->dev->struct_mutex); in msm_gpu_crashstate_put()
558 #define check_apriv(gpu, flags) \ argument
559 (((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))