Lines Matching full:gpu

48 	int (*get_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
50 int (*set_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
52 int (*hw_init)(struct msm_gpu *gpu);
53 int (*pm_suspend)(struct msm_gpu *gpu);
54 int (*pm_resume)(struct msm_gpu *gpu);
55 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
56 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
58 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
59 void (*recover)(struct msm_gpu *gpu);
60 void (*destroy)(struct msm_gpu *gpu);
62 /* show GPU status in debugfs: */
63 void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
66 void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
69 u64 (*gpu_busy)(struct msm_gpu *gpu, unsigned long *out_sample_rate);
70 struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
72 unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
74 void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp,
77 (struct msm_gpu *gpu, struct platform_device *pdev);
79 (struct msm_gpu *gpu);
80 uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
105 * A PM QoS constraint to limit max freq while the GPU is idle.
194 * General lock for serializing all the gpu things.
214 /* does gpu need hw_init? */
218 * global_faults: number of GPU hangs not attributed to a particular
245 /* work for handling GPU ioval faults: */
248 /* work for handling GPU recovery: */
276 /* To poll for cx gdsc collapse during gpu recovery */
298 static inline bool msm_gpu_active(struct msm_gpu *gpu) in msm_gpu_active() argument
302 for (i = 0; i < gpu->nr_rings; i++) { in msm_gpu_active()
303 struct msm_ringbuffer *ring = gpu->rb[i]; in msm_gpu_active()
326 * The number of priority levels provided by drm gpu scheduler. The
339 * @aspace: the per-process GPU address-space
378 * The total (cumulative) elapsed time GPU was busy with rendering
386 * The total (cumulative) GPU cycles elapsed attributed to this
408 * @gpu: the gpu instance
411 * @sched_prio: [out] the gpu scheduler priority level which the userspace
430 static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio, in msm_gpu_convert_priority() argument
442 if (rn >= gpu->nr_rings) in msm_gpu_convert_priority()
462 * @faults: the number of GPU hangs associated with this submitqueue
528 static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data) in gpu_write() argument
530 msm_writel(data, gpu->mmio + (reg << 2)); in gpu_write()
533 static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg) in gpu_read() argument
535 return msm_readl(gpu->mmio + (reg << 2)); in gpu_read()
538 static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or) in gpu_rmw() argument
540 msm_rmw(gpu->mmio + (reg << 2), mask, or); in gpu_rmw()
543 static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi) in gpu_read64() argument
549 * not quad word aligned and 2) the GPU hardware designers have a bit in gpu_read64()
551 * spins. The longer a GPU family goes the higher the chance that in gpu_read64()
561 val = (u64) msm_readl(gpu->mmio + (lo << 2)); in gpu_read64()
562 val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32); in gpu_read64()
567 static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val) in gpu_write64() argument
570 msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2)); in gpu_write64()
571 msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2)); in gpu_write64()
574 int msm_gpu_pm_suspend(struct msm_gpu *gpu);
575 int msm_gpu_pm_resume(struct msm_gpu *gpu);
577 void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
594 struct msm_gpu *gpu, int sysprof);
609 void msm_devfreq_init(struct msm_gpu *gpu);
610 void msm_devfreq_cleanup(struct msm_gpu *gpu);
611 void msm_devfreq_resume(struct msm_gpu *gpu);
612 void msm_devfreq_suspend(struct msm_gpu *gpu);
613 void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor);
614 void msm_devfreq_active(struct msm_gpu *gpu);
615 void msm_devfreq_idle(struct msm_gpu *gpu);
617 int msm_gpu_hw_init(struct msm_gpu *gpu);
619 void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
620 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
621 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
624 void msm_gpu_retire(struct msm_gpu *gpu);
625 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
628 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
632 msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
634 void msm_gpu_cleanup(struct msm_gpu *gpu);
646 static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu) in msm_gpu_crashstate_get() argument
650 mutex_lock(&gpu->lock); in msm_gpu_crashstate_get()
652 if (gpu->crashstate) { in msm_gpu_crashstate_get()
653 kref_get(&gpu->crashstate->ref); in msm_gpu_crashstate_get()
654 state = gpu->crashstate; in msm_gpu_crashstate_get()
657 mutex_unlock(&gpu->lock); in msm_gpu_crashstate_get()
662 static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu) in msm_gpu_crashstate_put() argument
664 mutex_lock(&gpu->lock); in msm_gpu_crashstate_put()
666 if (gpu->crashstate) { in msm_gpu_crashstate_put()
667 if (gpu->funcs->gpu_state_put(gpu->crashstate)) in msm_gpu_crashstate_put()
668 gpu->crashstate = NULL; in msm_gpu_crashstate_put()
671 mutex_unlock(&gpu->lock); in msm_gpu_crashstate_put()
678 #define check_apriv(gpu, flags) \ argument
679 (((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))