1  /*
2   * Copyright (C) 2015 Broadcom
3   *
4   * This program is free software; you can redistribute it and/or modify
5   * it under the terms of the GNU General Public License version 2 as
6   * published by the Free Software Foundation.
7   */
8  
9  #include <linux/mm_types.h>
10  #include <linux/reservation.h>
11  #include <drm/drmP.h>
12  #include <drm/drm_encoder.h>
13  #include <drm/drm_gem_cma_helper.h>
14  #include <drm/drm_atomic.h>
15  #include <drm/drm_syncobj.h>
16  
17  #include "uapi/drm/vc4_drm.h"
18  
19  /* Don't forget to update vc4_bo.c: bo_type_names[] when adding to
20   * this.
21   */
22  enum vc4_kernel_bo_type {
23  	/* Any kernel allocation (gem_create_object hook) before it
24  	 * gets another type set.
25  	 */
26  	VC4_BO_TYPE_KERNEL,
27  	VC4_BO_TYPE_V3D,
28  	VC4_BO_TYPE_V3D_SHADER,
29  	VC4_BO_TYPE_DUMB,
30  	VC4_BO_TYPE_BIN,
31  	VC4_BO_TYPE_RCL,
32  	VC4_BO_TYPE_BCL,
33  	VC4_BO_TYPE_KERNEL_CACHE,
34  	VC4_BO_TYPE_COUNT
35  };
36  
37  /* Performance monitor object. The perform lifetime is controlled by userspace
38   * using perfmon related ioctls. A perfmon can be attached to a submit_cl
39   * request, and when this is the case, HW perf counters will be activated just
40   * before the submit_cl is submitted to the GPU and disabled when the job is
41   * done. This way, only events related to a specific job will be counted.
42   */
43  struct vc4_perfmon {
44  	/* Tracks the number of users of the perfmon, when this counter reaches
45  	 * zero the perfmon is destroyed.
46  	 */
47  	refcount_t refcnt;
48  
49  	/* Number of counters activated in this perfmon instance
50  	 * (should be less than DRM_VC4_MAX_PERF_COUNTERS).
51  	 */
52  	u8 ncounters;
53  
54  	/* Events counted by the HW perf counters. */
55  	u8 events[DRM_VC4_MAX_PERF_COUNTERS];
56  
57  	/* Storage for counter values. Counters are incremented by the HW
58  	 * perf counter values every time the perfmon is attached to a GPU job.
59  	 * This way, perfmon users don't have to retrieve the results after
60  	 * each job if they want to track events covering several submissions.
61  	 * Note that counter values can't be reset, but you can fake a reset by
62  	 * destroying the perfmon and creating a new one.
63  	 */
64  	u64 counters[0];
65  };
66  
67  struct vc4_dev {
68  	struct drm_device *dev;
69  
70  	struct vc4_hdmi *hdmi;
71  	struct vc4_hvs *hvs;
72  	struct vc4_v3d *v3d;
73  	struct vc4_dpi *dpi;
74  	struct vc4_dsi *dsi1;
75  	struct vc4_vec *vec;
76  	struct vc4_txp *txp;
77  
78  	struct vc4_hang_state *hang_state;
79  
80  	/* The kernel-space BO cache.  Tracks buffers that have been
81  	 * unreferenced by all other users (refcounts of 0!) but not
82  	 * yet freed, so we can do cheap allocations.
83  	 */
84  	struct vc4_bo_cache {
85  		/* Array of list heads for entries in the BO cache,
86  		 * based on number of pages, so we can do O(1) lookups
87  		 * in the cache when allocating.
88  		 */
89  		struct list_head *size_list;
90  		uint32_t size_list_size;
91  
92  		/* List of all BOs in the cache, ordered by age, so we
93  		 * can do O(1) lookups when trying to free old
94  		 * buffers.
95  		 */
96  		struct list_head time_list;
97  		struct work_struct time_work;
98  		struct timer_list time_timer;
99  	} bo_cache;
100  
101  	u32 num_labels;
102  	struct vc4_label {
103  		const char *name;
104  		u32 num_allocated;
105  		u32 size_allocated;
106  	} *bo_labels;
107  
108  	/* Protects bo_cache and bo_labels. */
109  	struct mutex bo_lock;
110  
111  	/* Purgeable BO pool. All BOs in this pool can have their memory
112  	 * reclaimed if the driver is unable to allocate new BOs. We also
113  	 * keep stats related to the purge mechanism here.
114  	 */
115  	struct {
116  		struct list_head list;
117  		unsigned int num;
118  		size_t size;
119  		unsigned int purged_num;
120  		size_t purged_size;
121  		struct mutex lock;
122  	} purgeable;
123  
124  	uint64_t dma_fence_context;
125  
126  	/* Sequence number for the last job queued in bin_job_list.
127  	 * Starts at 0 (no jobs emitted).
128  	 */
129  	uint64_t emit_seqno;
130  
131  	/* Sequence number for the last completed job on the GPU.
132  	 * Starts at 0 (no jobs completed).
133  	 */
134  	uint64_t finished_seqno;
135  
136  	/* List of all struct vc4_exec_info for jobs to be executed in
137  	 * the binner.  The first job in the list is the one currently
138  	 * programmed into ct0ca for execution.
139  	 */
140  	struct list_head bin_job_list;
141  
142  	/* List of all struct vc4_exec_info for jobs that have
143  	 * completed binning and are ready for rendering.  The first
144  	 * job in the list is the one currently programmed into ct1ca
145  	 * for execution.
146  	 */
147  	struct list_head render_job_list;
148  
149  	/* List of the finished vc4_exec_infos waiting to be freed by
150  	 * job_done_work.
151  	 */
152  	struct list_head job_done_list;
153  	/* Spinlock used to synchronize the job_list and seqno
154  	 * accesses between the IRQ handler and GEM ioctls.
155  	 */
156  	spinlock_t job_lock;
157  	wait_queue_head_t job_wait_queue;
158  	struct work_struct job_done_work;
159  
160  	/* Used to track the active perfmon if any. Access to this field is
161  	 * protected by job_lock.
162  	 */
163  	struct vc4_perfmon *active_perfmon;
164  
165  	/* List of struct vc4_seqno_cb for callbacks to be made from a
166  	 * workqueue when the given seqno is passed.
167  	 */
168  	struct list_head seqno_cb_list;
169  
170  	/* The memory used for storing binner tile alloc, tile state,
171  	 * and overflow memory allocations.  This is freed when V3D
172  	 * powers down.
173  	 */
174  	struct vc4_bo *bin_bo;
175  
176  	/* Size of blocks allocated within bin_bo. */
177  	uint32_t bin_alloc_size;
178  
179  	/* Bitmask of the bin_alloc_size chunks in bin_bo that are
180  	 * used.
181  	 */
182  	uint32_t bin_alloc_used;
183  
184  	/* Bitmask of the current bin_alloc used for overflow memory. */
185  	uint32_t bin_alloc_overflow;
186  
187  	struct work_struct overflow_mem_work;
188  
189  	int power_refcount;
190  
191  	/* Mutex controlling the power refcount. */
192  	struct mutex power_lock;
193  
194  	struct {
195  		struct timer_list timer;
196  		struct work_struct reset_work;
197  	} hangcheck;
198  
199  	struct semaphore async_modeset;
200  
201  	struct drm_modeset_lock ctm_state_lock;
202  	struct drm_private_obj ctm_manager;
203  };
204  
205  static inline struct vc4_dev *
to_vc4_dev(struct drm_device * dev)206  to_vc4_dev(struct drm_device *dev)
207  {
208  	return (struct vc4_dev *)dev->dev_private;
209  }
210  
211  struct vc4_bo {
212  	struct drm_gem_cma_object base;
213  
214  	/* seqno of the last job to render using this BO. */
215  	uint64_t seqno;
216  
217  	/* seqno of the last job to use the RCL to write to this BO.
218  	 *
219  	 * Note that this doesn't include binner overflow memory
220  	 * writes.
221  	 */
222  	uint64_t write_seqno;
223  
224  	bool t_format;
225  
226  	/* List entry for the BO's position in either
227  	 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
228  	 */
229  	struct list_head unref_head;
230  
231  	/* Time in jiffies when the BO was put in vc4->bo_cache. */
232  	unsigned long free_time;
233  
234  	/* List entry for the BO's position in vc4_dev->bo_cache.size_list */
235  	struct list_head size_head;
236  
237  	/* Struct for shader validation state, if created by
238  	 * DRM_IOCTL_VC4_CREATE_SHADER_BO.
239  	 */
240  	struct vc4_validated_shader_info *validated_shader;
241  
242  	/* normally (resv == &_resv) except for imported bo's */
243  	struct reservation_object *resv;
244  	struct reservation_object _resv;
245  
246  	/* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i
247  	 * for user-allocated labels.
248  	 */
249  	int label;
250  
251  	/* Count the number of active users. This is needed to determine
252  	 * whether we can move the BO to the purgeable list or not (when the BO
253  	 * is used by the GPU or the display engine we can't purge it).
254  	 */
255  	refcount_t usecnt;
256  
257  	/* Store purgeable/purged state here */
258  	u32 madv;
259  	struct mutex madv_lock;
260  };
261  
262  static inline struct vc4_bo *
to_vc4_bo(struct drm_gem_object * bo)263  to_vc4_bo(struct drm_gem_object *bo)
264  {
265  	return (struct vc4_bo *)bo;
266  }
267  
268  struct vc4_fence {
269  	struct dma_fence base;
270  	struct drm_device *dev;
271  	/* vc4 seqno for signaled() test */
272  	uint64_t seqno;
273  };
274  
275  static inline struct vc4_fence *
to_vc4_fence(struct dma_fence * fence)276  to_vc4_fence(struct dma_fence *fence)
277  {
278  	return (struct vc4_fence *)fence;
279  }
280  
281  struct vc4_seqno_cb {
282  	struct work_struct work;
283  	uint64_t seqno;
284  	void (*func)(struct vc4_seqno_cb *cb);
285  };
286  
287  struct vc4_v3d {
288  	struct vc4_dev *vc4;
289  	struct platform_device *pdev;
290  	void __iomem *regs;
291  	struct clk *clk;
292  };
293  
294  struct vc4_hvs {
295  	struct platform_device *pdev;
296  	void __iomem *regs;
297  	u32 __iomem *dlist;
298  
299  	/* Memory manager for CRTCs to allocate space in the display
300  	 * list.  Units are dwords.
301  	 */
302  	struct drm_mm dlist_mm;
303  	/* Memory manager for the LBM memory used by HVS scaling. */
304  	struct drm_mm lbm_mm;
305  	spinlock_t mm_lock;
306  
307  	struct drm_mm_node mitchell_netravali_filter;
308  };
309  
310  struct vc4_plane {
311  	struct drm_plane base;
312  };
313  
314  static inline struct vc4_plane *
to_vc4_plane(struct drm_plane * plane)315  to_vc4_plane(struct drm_plane *plane)
316  {
317  	return (struct vc4_plane *)plane;
318  }
319  
320  enum vc4_scaling_mode {
321  	VC4_SCALING_NONE,
322  	VC4_SCALING_TPZ,
323  	VC4_SCALING_PPF,
324  };
325  
326  struct vc4_plane_state {
327  	struct drm_plane_state base;
328  	/* System memory copy of the display list for this element, computed
329  	 * at atomic_check time.
330  	 */
331  	u32 *dlist;
332  	u32 dlist_size; /* Number of dwords allocated for the display list */
333  	u32 dlist_count; /* Number of used dwords in the display list. */
334  
335  	/* Offset in the dlist to various words, for pageflip or
336  	 * cursor updates.
337  	 */
338  	u32 pos0_offset;
339  	u32 pos2_offset;
340  	u32 ptr0_offset;
341  
342  	/* Offset where the plane's dlist was last stored in the
343  	 * hardware at vc4_crtc_atomic_flush() time.
344  	 */
345  	u32 __iomem *hw_dlist;
346  
347  	/* Clipped coordinates of the plane on the display. */
348  	int crtc_x, crtc_y, crtc_w, crtc_h;
349  	/* Clipped area being scanned from in the FB. */
350  	u32 src_x, src_y;
351  
352  	u32 src_w[2], src_h[2];
353  
354  	/* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */
355  	enum vc4_scaling_mode x_scaling[2], y_scaling[2];
356  	bool is_unity;
357  	bool is_yuv;
358  
359  	/* Offset to start scanning out from the start of the plane's
360  	 * BO.
361  	 */
362  	u32 offsets[3];
363  
364  	/* Our allocation in LBM for temporary storage during scaling. */
365  	struct drm_mm_node lbm;
366  
367  	/* Set when the plane has per-pixel alpha content or does not cover
368  	 * the entire screen. This is a hint to the CRTC that it might need
369  	 * to enable background color fill.
370  	 */
371  	bool needs_bg_fill;
372  };
373  
374  static inline struct vc4_plane_state *
to_vc4_plane_state(struct drm_plane_state * state)375  to_vc4_plane_state(struct drm_plane_state *state)
376  {
377  	return (struct vc4_plane_state *)state;
378  }
379  
380  enum vc4_encoder_type {
381  	VC4_ENCODER_TYPE_NONE,
382  	VC4_ENCODER_TYPE_HDMI,
383  	VC4_ENCODER_TYPE_VEC,
384  	VC4_ENCODER_TYPE_DSI0,
385  	VC4_ENCODER_TYPE_DSI1,
386  	VC4_ENCODER_TYPE_SMI,
387  	VC4_ENCODER_TYPE_DPI,
388  };
389  
390  struct vc4_encoder {
391  	struct drm_encoder base;
392  	enum vc4_encoder_type type;
393  	u32 clock_select;
394  };
395  
396  static inline struct vc4_encoder *
to_vc4_encoder(struct drm_encoder * encoder)397  to_vc4_encoder(struct drm_encoder *encoder)
398  {
399  	return container_of(encoder, struct vc4_encoder, base);
400  }
401  
402  struct vc4_crtc_data {
403  	/* Which channel of the HVS this pixelvalve sources from. */
404  	int hvs_channel;
405  
406  	enum vc4_encoder_type encoder_types[4];
407  };
408  
409  struct vc4_crtc {
410  	struct drm_crtc base;
411  	const struct vc4_crtc_data *data;
412  	void __iomem *regs;
413  
414  	/* Timestamp at start of vblank irq - unaffected by lock delays. */
415  	ktime_t t_vblank;
416  
417  	/* Which HVS channel we're using for our CRTC. */
418  	int channel;
419  
420  	u8 lut_r[256];
421  	u8 lut_g[256];
422  	u8 lut_b[256];
423  	/* Size in pixels of the COB memory allocated to this CRTC. */
424  	u32 cob_size;
425  
426  	struct drm_pending_vblank_event *event;
427  };
428  
429  static inline struct vc4_crtc *
to_vc4_crtc(struct drm_crtc * crtc)430  to_vc4_crtc(struct drm_crtc *crtc)
431  {
432  	return (struct vc4_crtc *)crtc;
433  }
434  
435  #define V3D_READ(offset) readl(vc4->v3d->regs + offset)
436  #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
437  #define HVS_READ(offset) readl(vc4->hvs->regs + offset)
438  #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
439  
440  struct vc4_exec_info {
441  	/* Sequence number for this bin/render job. */
442  	uint64_t seqno;
443  
444  	/* Latest write_seqno of any BO that binning depends on. */
445  	uint64_t bin_dep_seqno;
446  
447  	struct dma_fence *fence;
448  
449  	/* Last current addresses the hardware was processing when the
450  	 * hangcheck timer checked on us.
451  	 */
452  	uint32_t last_ct0ca, last_ct1ca;
453  
454  	/* Kernel-space copy of the ioctl arguments */
455  	struct drm_vc4_submit_cl *args;
456  
457  	/* This is the array of BOs that were looked up at the start of exec.
458  	 * Command validation will use indices into this array.
459  	 */
460  	struct drm_gem_cma_object **bo;
461  	uint32_t bo_count;
462  
463  	/* List of BOs that are being written by the RCL.  Other than
464  	 * the binner temporary storage, this is all the BOs written
465  	 * by the job.
466  	 */
467  	struct drm_gem_cma_object *rcl_write_bo[4];
468  	uint32_t rcl_write_bo_count;
469  
470  	/* Pointers for our position in vc4->job_list */
471  	struct list_head head;
472  
473  	/* List of other BOs used in the job that need to be released
474  	 * once the job is complete.
475  	 */
476  	struct list_head unref_list;
477  
478  	/* Current unvalidated indices into @bo loaded by the non-hardware
479  	 * VC4_PACKET_GEM_HANDLES.
480  	 */
481  	uint32_t bo_index[2];
482  
483  	/* This is the BO where we store the validated command lists, shader
484  	 * records, and uniforms.
485  	 */
486  	struct drm_gem_cma_object *exec_bo;
487  
488  	/**
489  	 * This tracks the per-shader-record state (packet 64) that
490  	 * determines the length of the shader record and the offset
491  	 * it's expected to be found at.  It gets read in from the
492  	 * command lists.
493  	 */
494  	struct vc4_shader_state {
495  		uint32_t addr;
496  		/* Maximum vertex index referenced by any primitive using this
497  		 * shader state.
498  		 */
499  		uint32_t max_index;
500  	} *shader_state;
501  
502  	/** How many shader states the user declared they were using. */
503  	uint32_t shader_state_size;
504  	/** How many shader state records the validator has seen. */
505  	uint32_t shader_state_count;
506  
507  	bool found_tile_binning_mode_config_packet;
508  	bool found_start_tile_binning_packet;
509  	bool found_increment_semaphore_packet;
510  	bool found_flush;
511  	uint8_t bin_tiles_x, bin_tiles_y;
512  	/* Physical address of the start of the tile alloc array
513  	 * (where each tile's binned CL will start)
514  	 */
515  	uint32_t tile_alloc_offset;
516  	/* Bitmask of which binner slots are freed when this job completes. */
517  	uint32_t bin_slots;
518  
519  	/**
520  	 * Computed addresses pointing into exec_bo where we start the
521  	 * bin thread (ct0) and render thread (ct1).
522  	 */
523  	uint32_t ct0ca, ct0ea;
524  	uint32_t ct1ca, ct1ea;
525  
526  	/* Pointer to the unvalidated bin CL (if present). */
527  	void *bin_u;
528  
529  	/* Pointers to the shader recs.  These paddr gets incremented as CL
530  	 * packets are relocated in validate_gl_shader_state, and the vaddrs
531  	 * (u and v) get incremented and size decremented as the shader recs
532  	 * themselves are validated.
533  	 */
534  	void *shader_rec_u;
535  	void *shader_rec_v;
536  	uint32_t shader_rec_p;
537  	uint32_t shader_rec_size;
538  
539  	/* Pointers to the uniform data.  These pointers are incremented, and
540  	 * size decremented, as each batch of uniforms is uploaded.
541  	 */
542  	void *uniforms_u;
543  	void *uniforms_v;
544  	uint32_t uniforms_p;
545  	uint32_t uniforms_size;
546  
547  	/* Pointer to a performance monitor object if the user requested it,
548  	 * NULL otherwise.
549  	 */
550  	struct vc4_perfmon *perfmon;
551  };
552  
553  /* Per-open file private data. Any driver-specific resource that has to be
554   * released when the DRM file is closed should be placed here.
555   */
556  struct vc4_file {
557  	struct {
558  		struct idr idr;
559  		struct mutex lock;
560  	} perfmon;
561  };
562  
563  static inline struct vc4_exec_info *
vc4_first_bin_job(struct vc4_dev * vc4)564  vc4_first_bin_job(struct vc4_dev *vc4)
565  {
566  	return list_first_entry_or_null(&vc4->bin_job_list,
567  					struct vc4_exec_info, head);
568  }
569  
570  static inline struct vc4_exec_info *
vc4_first_render_job(struct vc4_dev * vc4)571  vc4_first_render_job(struct vc4_dev *vc4)
572  {
573  	return list_first_entry_or_null(&vc4->render_job_list,
574  					struct vc4_exec_info, head);
575  }
576  
577  static inline struct vc4_exec_info *
vc4_last_render_job(struct vc4_dev * vc4)578  vc4_last_render_job(struct vc4_dev *vc4)
579  {
580  	if (list_empty(&vc4->render_job_list))
581  		return NULL;
582  	return list_last_entry(&vc4->render_job_list,
583  			       struct vc4_exec_info, head);
584  }
585  
586  /**
587   * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
588   * setup parameters.
589   *
590   * This will be used at draw time to relocate the reference to the texture
591   * contents in p0, and validate that the offset combined with
592   * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
593   * Note that the hardware treats unprovided config parameters as 0, so not all
594   * of them need to be set up for every texure sample, and we'll store ~0 as
595   * the offset to mark the unused ones.
596   *
597   * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
598   * Setup") for definitions of the texture parameters.
599   */
600  struct vc4_texture_sample_info {
601  	bool is_direct;
602  	uint32_t p_offset[4];
603  };
604  
605  /**
606   * struct vc4_validated_shader_info - information about validated shaders that
607   * needs to be used from command list validation.
608   *
609   * For a given shader, each time a shader state record references it, we need
610   * to verify that the shader doesn't read more uniforms than the shader state
611   * record's uniform BO pointer can provide, and we need to apply relocations
612   * and validate the shader state record's uniforms that define the texture
613   * samples.
614   */
615  struct vc4_validated_shader_info {
616  	uint32_t uniforms_size;
617  	uint32_t uniforms_src_size;
618  	uint32_t num_texture_samples;
619  	struct vc4_texture_sample_info *texture_samples;
620  
621  	uint32_t num_uniform_addr_offsets;
622  	uint32_t *uniform_addr_offsets;
623  
624  	bool is_threaded;
625  };
626  
627  /**
628   * _wait_for - magic (register) wait macro
629   *
630   * Does the right thing for modeset paths when run under kdgb or similar atomic
631   * contexts. Note that it's important that we check the condition again after
632   * having timed out, since the timeout could be due to preemption or similar and
633   * we've never had a chance to check the condition before the timeout.
634   */
635  #define _wait_for(COND, MS, W) ({ \
636  	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1;	\
637  	int ret__ = 0;							\
638  	while (!(COND)) {						\
639  		if (time_after(jiffies, timeout__)) {			\
640  			if (!(COND))					\
641  				ret__ = -ETIMEDOUT;			\
642  			break;						\
643  		}							\
644  		if (W && drm_can_sleep())  {				\
645  			msleep(W);					\
646  		} else {						\
647  			cpu_relax();					\
648  		}							\
649  	}								\
650  	ret__;								\
651  })
652  
653  #define wait_for(COND, MS) _wait_for(COND, MS, 1)
654  
655  /* vc4_bo.c */
656  struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
657  void vc4_free_object(struct drm_gem_object *gem_obj);
658  struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
659  			     bool from_cache, enum vc4_kernel_bo_type type);
660  int vc4_dumb_create(struct drm_file *file_priv,
661  		    struct drm_device *dev,
662  		    struct drm_mode_create_dumb *args);
663  struct dma_buf *vc4_prime_export(struct drm_device *dev,
664  				 struct drm_gem_object *obj, int flags);
665  int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
666  			struct drm_file *file_priv);
667  int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
668  			       struct drm_file *file_priv);
669  int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
670  		      struct drm_file *file_priv);
671  int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
672  			 struct drm_file *file_priv);
673  int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
674  			 struct drm_file *file_priv);
675  int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
676  			     struct drm_file *file_priv);
677  int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
678  		       struct drm_file *file_priv);
679  vm_fault_t vc4_fault(struct vm_fault *vmf);
680  int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
681  struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
682  int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
683  struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
684  						 struct dma_buf_attachment *attach,
685  						 struct sg_table *sgt);
686  void *vc4_prime_vmap(struct drm_gem_object *obj);
687  int vc4_bo_cache_init(struct drm_device *dev);
688  void vc4_bo_cache_destroy(struct drm_device *dev);
689  int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
690  int vc4_bo_inc_usecnt(struct vc4_bo *bo);
691  void vc4_bo_dec_usecnt(struct vc4_bo *bo);
692  void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
693  void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
694  
695  /* vc4_crtc.c */
696  extern struct platform_driver vc4_crtc_driver;
697  int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
698  bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
699  			     bool in_vblank_irq, int *vpos, int *hpos,
700  			     ktime_t *stime, ktime_t *etime,
701  			     const struct drm_display_mode *mode);
702  void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
703  void vc4_crtc_txp_armed(struct drm_crtc_state *state);
704  
705  /* vc4_debugfs.c */
706  int vc4_debugfs_init(struct drm_minor *minor);
707  
708  /* vc4_drv.c */
709  void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
710  
711  /* vc4_dpi.c */
712  extern struct platform_driver vc4_dpi_driver;
713  int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused);
714  
715  /* vc4_dsi.c */
716  extern struct platform_driver vc4_dsi_driver;
717  int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused);
718  
719  /* vc4_fence.c */
720  extern const struct dma_fence_ops vc4_fence_ops;
721  
722  /* vc4_gem.c */
723  void vc4_gem_init(struct drm_device *dev);
724  void vc4_gem_destroy(struct drm_device *dev);
725  int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
726  			struct drm_file *file_priv);
727  int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
728  			 struct drm_file *file_priv);
729  int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
730  		      struct drm_file *file_priv);
731  void vc4_submit_next_bin_job(struct drm_device *dev);
732  void vc4_submit_next_render_job(struct drm_device *dev);
733  void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec);
734  int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
735  		       uint64_t timeout_ns, bool interruptible);
736  void vc4_job_handle_completed(struct vc4_dev *vc4);
737  int vc4_queue_seqno_cb(struct drm_device *dev,
738  		       struct vc4_seqno_cb *cb, uint64_t seqno,
739  		       void (*func)(struct vc4_seqno_cb *cb));
740  int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
741  			  struct drm_file *file_priv);
742  
743  /* vc4_hdmi.c */
744  extern struct platform_driver vc4_hdmi_driver;
745  int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
746  
747  /* vc4_vec.c */
748  extern struct platform_driver vc4_vec_driver;
749  int vc4_vec_debugfs_regs(struct seq_file *m, void *unused);
750  
751  /* vc4_txp.c */
752  extern struct platform_driver vc4_txp_driver;
753  int vc4_txp_debugfs_regs(struct seq_file *m, void *unused);
754  
755  /* vc4_irq.c */
756  irqreturn_t vc4_irq(int irq, void *arg);
757  void vc4_irq_preinstall(struct drm_device *dev);
758  int vc4_irq_postinstall(struct drm_device *dev);
759  void vc4_irq_uninstall(struct drm_device *dev);
760  void vc4_irq_reset(struct drm_device *dev);
761  
762  /* vc4_hvs.c */
763  extern struct platform_driver vc4_hvs_driver;
764  void vc4_hvs_dump_state(struct drm_device *dev);
765  int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused);
766  
767  /* vc4_kms.c */
768  int vc4_kms_load(struct drm_device *dev);
769  
770  /* vc4_plane.c */
771  struct drm_plane *vc4_plane_init(struct drm_device *dev,
772  				 enum drm_plane_type type);
773  u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
774  u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
775  void vc4_plane_async_set_fb(struct drm_plane *plane,
776  			    struct drm_framebuffer *fb);
777  
778  /* vc4_v3d.c */
779  extern struct platform_driver vc4_v3d_driver;
780  int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
781  int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
782  int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
783  
784  /* vc4_validate.c */
785  int
786  vc4_validate_bin_cl(struct drm_device *dev,
787  		    void *validated,
788  		    void *unvalidated,
789  		    struct vc4_exec_info *exec);
790  
791  int
792  vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
793  
794  struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
795  				      uint32_t hindex);
796  
797  int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
798  
799  bool vc4_check_tex_size(struct vc4_exec_info *exec,
800  			struct drm_gem_cma_object *fbo,
801  			uint32_t offset, uint8_t tiling_format,
802  			uint32_t width, uint32_t height, uint8_t cpp);
803  
804  /* vc4_validate_shader.c */
805  struct vc4_validated_shader_info *
806  vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
807  
808  /* vc4_perfmon.c */
809  void vc4_perfmon_get(struct vc4_perfmon *perfmon);
810  void vc4_perfmon_put(struct vc4_perfmon *perfmon);
811  void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon);
812  void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
813  		      bool capture);
814  struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id);
815  void vc4_perfmon_open_file(struct vc4_file *vc4file);
816  void vc4_perfmon_close_file(struct vc4_file *vc4file);
817  int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
818  			     struct drm_file *file_priv);
819  int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
820  			      struct drm_file *file_priv);
821  int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
822  				 struct drm_file *file_priv);
823