1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2015-2018 Broadcom */
3
4 #include <linux/delay.h>
5 #include <linux/mutex.h>
6 #include <linux/spinlock_types.h>
7 #include <linux/workqueue.h>
8
9 #include <drm/drm_encoder.h>
10 #include <drm/drm_gem.h>
11 #include <drm/drm_gem_shmem_helper.h>
12 #include <drm/gpu_scheduler.h>
13
14 #include "uapi/drm/v3d_drm.h"
15
16 struct clk;
17 struct platform_device;
18 struct reset_control;
19
20 #define GMP_GRANULARITY (128 * 1024)
21
22 /* Enum for each of the V3D queues. */
23 enum v3d_queue {
24 V3D_BIN,
25 V3D_RENDER,
26 V3D_TFU,
27 V3D_CSD,
28 V3D_CACHE_CLEAN,
29 };
30
31 #define V3D_MAX_QUEUES (V3D_CACHE_CLEAN + 1)
32
33 struct v3d_queue_state {
34 struct drm_gpu_scheduler sched;
35
36 u64 fence_context;
37 u64 emit_seqno;
38 };
39
40 struct v3d_dev {
41 struct drm_device drm;
42
43 /* Short representation (e.g. 33, 41) of the V3D tech version
44 * and revision.
45 */
46 int ver;
47 bool single_irq_line;
48
49 void __iomem *hub_regs;
50 void __iomem *core_regs[3];
51 void __iomem *bridge_regs;
52 void __iomem *gca_regs;
53 struct clk *clk;
54 struct reset_control *reset;
55
56 /* Virtual and DMA addresses of the single shared page table. */
57 volatile u32 *pt;
58 dma_addr_t pt_paddr;
59
60 /* Virtual and DMA addresses of the MMU's scratch page. When
61 * a read or write is invalid in the MMU, it will be
62 * redirected here.
63 */
64 void *mmu_scratch;
65 dma_addr_t mmu_scratch_paddr;
66 /* virtual address bits from V3D to the MMU. */
67 int va_width;
68
69 /* Number of V3D cores. */
70 u32 cores;
71
72 /* Allocator managing the address space. All units are in
73 * number of pages.
74 */
75 struct drm_mm mm;
76 spinlock_t mm_lock;
77
78 struct work_struct overflow_mem_work;
79
80 struct v3d_bin_job *bin_job;
81 struct v3d_render_job *render_job;
82 struct v3d_tfu_job *tfu_job;
83 struct v3d_csd_job *csd_job;
84
85 struct v3d_queue_state queue[V3D_MAX_QUEUES];
86
87 /* Spinlock used to synchronize the overflow memory
88 * management against bin job submission.
89 */
90 spinlock_t job_lock;
91
92 /* Protects bo_stats */
93 struct mutex bo_lock;
94
95 /* Lock taken when resetting the GPU, to keep multiple
96 * processes from trying to park the scheduler threads and
97 * reset at once.
98 */
99 struct mutex reset_lock;
100
101 /* Lock taken when creating and pushing the GPU scheduler
102 * jobs, to keep the sched-fence seqnos in order.
103 */
104 struct mutex sched_lock;
105
106 /* Lock taken during a cache clean and when initiating an L2
107 * flush, to keep L2 flushes from interfering with the
108 * synchronous L2 cleans.
109 */
110 struct mutex cache_clean_lock;
111
112 struct {
113 u32 num_allocated;
114 u32 pages_allocated;
115 } bo_stats;
116 };
117
118 static inline struct v3d_dev *
to_v3d_dev(struct drm_device * dev)119 to_v3d_dev(struct drm_device *dev)
120 {
121 return container_of(dev, struct v3d_dev, drm);
122 }
123
124 static inline bool
v3d_has_csd(struct v3d_dev * v3d)125 v3d_has_csd(struct v3d_dev *v3d)
126 {
127 return v3d->ver >= 41;
128 }
129
130 #define v3d_to_pdev(v3d) to_platform_device((v3d)->drm.dev)
131
132 /* The per-fd struct, which tracks the MMU mappings. */
133 struct v3d_file_priv {
134 struct v3d_dev *v3d;
135
136 struct drm_sched_entity sched_entity[V3D_MAX_QUEUES];
137 };
138
139 struct v3d_bo {
140 struct drm_gem_shmem_object base;
141
142 struct drm_mm_node node;
143
144 /* List entry for the BO's position in
145 * v3d_render_job->unref_list
146 */
147 struct list_head unref_head;
148 };
149
150 static inline struct v3d_bo *
to_v3d_bo(struct drm_gem_object * bo)151 to_v3d_bo(struct drm_gem_object *bo)
152 {
153 return (struct v3d_bo *)bo;
154 }
155
156 struct v3d_fence {
157 struct dma_fence base;
158 struct drm_device *dev;
159 /* v3d seqno for signaled() test */
160 u64 seqno;
161 enum v3d_queue queue;
162 };
163
164 static inline struct v3d_fence *
to_v3d_fence(struct dma_fence * fence)165 to_v3d_fence(struct dma_fence *fence)
166 {
167 return (struct v3d_fence *)fence;
168 }
169
170 #define V3D_READ(offset) readl(v3d->hub_regs + offset)
171 #define V3D_WRITE(offset, val) writel(val, v3d->hub_regs + offset)
172
173 #define V3D_BRIDGE_READ(offset) readl(v3d->bridge_regs + offset)
174 #define V3D_BRIDGE_WRITE(offset, val) writel(val, v3d->bridge_regs + offset)
175
176 #define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset)
177 #define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset)
178
179 #define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset)
180 #define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset)
181
182 struct v3d_job {
183 struct drm_sched_job base;
184
185 struct kref refcount;
186
187 struct v3d_dev *v3d;
188
189 /* This is the array of BOs that were looked up at the start
190 * of submission.
191 */
192 struct drm_gem_object **bo;
193 u32 bo_count;
194
195 /* Array of struct dma_fence * to block on before submitting this job.
196 */
197 struct xarray deps;
198 unsigned long last_dep;
199
200 /* v3d fence to be signaled by IRQ handler when the job is complete. */
201 struct dma_fence *irq_fence;
202
203 /* scheduler fence for when the job is considered complete and
204 * the BO reservations can be released.
205 */
206 struct dma_fence *done_fence;
207
208 /* Callback for the freeing of the job on refcount going to 0. */
209 void (*free)(struct kref *ref);
210 };
211
212 struct v3d_bin_job {
213 struct v3d_job base;
214
215 /* GPU virtual addresses of the start/end of the CL job. */
216 u32 start, end;
217
218 u32 timedout_ctca, timedout_ctra;
219
220 /* Corresponding render job, for attaching our overflow memory. */
221 struct v3d_render_job *render;
222
223 /* Submitted tile memory allocation start/size, tile state. */
224 u32 qma, qms, qts;
225 };
226
227 struct v3d_render_job {
228 struct v3d_job base;
229
230 /* GPU virtual addresses of the start/end of the CL job. */
231 u32 start, end;
232
233 u32 timedout_ctca, timedout_ctra;
234
235 /* List of overflow BOs used in the job that need to be
236 * released once the job is complete.
237 */
238 struct list_head unref_list;
239 };
240
241 struct v3d_tfu_job {
242 struct v3d_job base;
243
244 struct drm_v3d_submit_tfu args;
245 };
246
247 struct v3d_csd_job {
248 struct v3d_job base;
249
250 u32 timedout_batches;
251
252 struct drm_v3d_submit_csd args;
253 };
254
255 /**
256 * __wait_for - magic wait macro
257 *
258 * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
259 * important that we check the condition again after having timed out, since the
260 * timeout could be due to preemption or similar and we've never had a chance to
261 * check the condition before the timeout.
262 */
263 #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
264 const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
265 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
266 int ret__; \
267 might_sleep(); \
268 for (;;) { \
269 const bool expired__ = ktime_after(ktime_get_raw(), end__); \
270 OP; \
271 /* Guarantee COND check prior to timeout */ \
272 barrier(); \
273 if (COND) { \
274 ret__ = 0; \
275 break; \
276 } \
277 if (expired__) { \
278 ret__ = -ETIMEDOUT; \
279 break; \
280 } \
281 usleep_range(wait__, wait__ * 2); \
282 if (wait__ < (Wmax)) \
283 wait__ <<= 1; \
284 } \
285 ret__; \
286 })
287
288 #define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
289 (Wmax))
290 #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
291
nsecs_to_jiffies_timeout(const u64 n)292 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
293 {
294 /* nsecs_to_jiffies64() does not guard against overflow */
295 if (NSEC_PER_SEC % HZ &&
296 div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
297 return MAX_JIFFY_OFFSET;
298
299 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
300 }
301
302 /* v3d_bo.c */
303 struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size);
304 void v3d_free_object(struct drm_gem_object *gem_obj);
305 struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
306 size_t size);
307 int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
308 struct drm_file *file_priv);
309 int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
310 struct drm_file *file_priv);
311 int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
312 struct drm_file *file_priv);
313 struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
314 struct dma_buf_attachment *attach,
315 struct sg_table *sgt);
316
317 /* v3d_debugfs.c */
318 void v3d_debugfs_init(struct drm_minor *minor);
319
320 /* v3d_fence.c */
321 extern const struct dma_fence_ops v3d_fence_ops;
322 struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
323
324 /* v3d_gem.c */
325 int v3d_gem_init(struct drm_device *dev);
326 void v3d_gem_destroy(struct drm_device *dev);
327 int v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
328 struct drm_file *file_priv);
329 int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
330 struct drm_file *file_priv);
331 int v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
332 struct drm_file *file_priv);
333 int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
334 struct drm_file *file_priv);
335 void v3d_job_put(struct v3d_job *job);
336 void v3d_reset(struct v3d_dev *v3d);
337 void v3d_invalidate_caches(struct v3d_dev *v3d);
338 void v3d_clean_caches(struct v3d_dev *v3d);
339
340 /* v3d_irq.c */
341 int v3d_irq_init(struct v3d_dev *v3d);
342 void v3d_irq_enable(struct v3d_dev *v3d);
343 void v3d_irq_disable(struct v3d_dev *v3d);
344 void v3d_irq_reset(struct v3d_dev *v3d);
345
346 /* v3d_mmu.c */
347 int v3d_mmu_get_offset(struct drm_file *file_priv, struct v3d_bo *bo,
348 u32 *offset);
349 int v3d_mmu_set_page_table(struct v3d_dev *v3d);
350 void v3d_mmu_insert_ptes(struct v3d_bo *bo);
351 void v3d_mmu_remove_ptes(struct v3d_bo *bo);
352
353 /* v3d_sched.c */
354 int v3d_sched_init(struct v3d_dev *v3d);
355 void v3d_sched_fini(struct v3d_dev *v3d);
356