1 /*
2 * Copyright 2011 Red Hat, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 */
22 #include "qxl_drv.h"
23 #include "qxl_object.h"
24 #include <trace/events/dma_fence.h>
25
26 /*
27 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
28 * into 256 byte chunks for now - gives 16 cmds per page.
29 *
30 * use an ida to index into the chunks?
31 */
32 /* manage releaseables */
33 /* stack them 16 high for now -drawable object is 191 */
34 #define RELEASE_SIZE 256
35 #define RELEASES_PER_BO (4096 / RELEASE_SIZE)
36 /* put an alloc/dealloc surface cmd into one bo and round up to 128 */
37 #define SURFACE_RELEASE_SIZE 128
38 #define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
39
40 static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
41 static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
42
qxl_get_driver_name(struct dma_fence * fence)43 static const char *qxl_get_driver_name(struct dma_fence *fence)
44 {
45 return "qxl";
46 }
47
qxl_get_timeline_name(struct dma_fence * fence)48 static const char *qxl_get_timeline_name(struct dma_fence *fence)
49 {
50 return "release";
51 }
52
qxl_fence_wait(struct dma_fence * fence,bool intr,signed long timeout)53 static long qxl_fence_wait(struct dma_fence *fence, bool intr,
54 signed long timeout)
55 {
56 struct qxl_device *qdev;
57 struct qxl_release *release;
58 int count = 0, sc = 0;
59 bool have_drawable_releases;
60 unsigned long cur, end = jiffies + timeout;
61
62 qdev = container_of(fence->lock, struct qxl_device, release_lock);
63 release = container_of(fence, struct qxl_release, base);
64 have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
65
66 retry:
67 sc++;
68
69 if (dma_fence_is_signaled(fence))
70 goto signaled;
71
72 qxl_io_notify_oom(qdev);
73
74 for (count = 0; count < 11; count++) {
75 if (!qxl_queue_garbage_collect(qdev, true))
76 break;
77
78 if (dma_fence_is_signaled(fence))
79 goto signaled;
80 }
81
82 if (dma_fence_is_signaled(fence))
83 goto signaled;
84
85 if (have_drawable_releases || sc < 4) {
86 if (sc > 2)
87 /* back off */
88 usleep_range(500, 1000);
89
90 if (time_after(jiffies, end))
91 return 0;
92
93 if (have_drawable_releases && sc > 300) {
94 DMA_FENCE_WARN(fence, "failed to wait on release %llu "
95 "after spincount %d\n",
96 fence->context & ~0xf0000000, sc);
97 goto signaled;
98 }
99 goto retry;
100 }
101 /*
102 * yeah, original sync_obj_wait gave up after 3 spins when
103 * have_drawable_releases is not set.
104 */
105
106 signaled:
107 cur = jiffies;
108 if (time_after(cur, end))
109 return 0;
110 return end - cur;
111 }
112
113 static const struct dma_fence_ops qxl_fence_ops = {
114 .get_driver_name = qxl_get_driver_name,
115 .get_timeline_name = qxl_get_timeline_name,
116 .wait = qxl_fence_wait,
117 };
118
119 static int
qxl_release_alloc(struct qxl_device * qdev,int type,struct qxl_release ** ret)120 qxl_release_alloc(struct qxl_device *qdev, int type,
121 struct qxl_release **ret)
122 {
123 struct qxl_release *release;
124 int handle;
125 size_t size = sizeof(*release);
126
127 release = kmalloc(size, GFP_KERNEL);
128 if (!release) {
129 DRM_ERROR("Out of memory\n");
130 return -ENOMEM;
131 }
132 release->base.ops = NULL;
133 release->type = type;
134 release->release_offset = 0;
135 release->surface_release_id = 0;
136 INIT_LIST_HEAD(&release->bos);
137
138 idr_preload(GFP_KERNEL);
139 spin_lock(&qdev->release_idr_lock);
140 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
141 release->base.seqno = ++qdev->release_seqno;
142 spin_unlock(&qdev->release_idr_lock);
143 idr_preload_end();
144 if (handle < 0) {
145 kfree(release);
146 *ret = NULL;
147 return handle;
148 }
149 *ret = release;
150 DRM_DEBUG_DRIVER("allocated release %d\n", handle);
151 release->id = handle;
152 return handle;
153 }
154
155 static void
qxl_release_free_list(struct qxl_release * release)156 qxl_release_free_list(struct qxl_release *release)
157 {
158 while (!list_empty(&release->bos)) {
159 struct qxl_bo_list *entry;
160 struct qxl_bo *bo;
161
162 entry = container_of(release->bos.next,
163 struct qxl_bo_list, tv.head);
164 bo = to_qxl_bo(entry->tv.bo);
165 qxl_bo_unref(&bo);
166 list_del(&entry->tv.head);
167 kfree(entry);
168 }
169 release->release_bo = NULL;
170 }
171
172 void
qxl_release_free(struct qxl_device * qdev,struct qxl_release * release)173 qxl_release_free(struct qxl_device *qdev,
174 struct qxl_release *release)
175 {
176 DRM_DEBUG_DRIVER("release %d, type %d\n", release->id, release->type);
177
178 if (release->surface_release_id)
179 qxl_surface_id_dealloc(qdev, release->surface_release_id);
180
181 spin_lock(&qdev->release_idr_lock);
182 idr_remove(&qdev->release_idr, release->id);
183 spin_unlock(&qdev->release_idr_lock);
184
185 if (release->base.ops) {
186 WARN_ON(list_empty(&release->bos));
187 qxl_release_free_list(release);
188
189 dma_fence_signal(&release->base);
190 dma_fence_put(&release->base);
191 } else {
192 qxl_release_free_list(release);
193 kfree(release);
194 }
195 }
196
qxl_release_bo_alloc(struct qxl_device * qdev,struct qxl_bo ** bo)197 static int qxl_release_bo_alloc(struct qxl_device *qdev,
198 struct qxl_bo **bo)
199 {
200 /* pin releases bo's they are too messy to evict */
201 return qxl_bo_create(qdev, PAGE_SIZE, false, true,
202 QXL_GEM_DOMAIN_VRAM, NULL, bo);
203 }
204
qxl_release_list_add(struct qxl_release * release,struct qxl_bo * bo)205 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
206 {
207 struct qxl_bo_list *entry;
208
209 list_for_each_entry(entry, &release->bos, tv.head) {
210 if (entry->tv.bo == &bo->tbo)
211 return 0;
212 }
213
214 entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
215 if (!entry)
216 return -ENOMEM;
217
218 qxl_bo_ref(bo);
219 entry->tv.bo = &bo->tbo;
220 entry->tv.shared = false;
221 list_add_tail(&entry->tv.head, &release->bos);
222 return 0;
223 }
224
qxl_release_validate_bo(struct qxl_bo * bo)225 static int qxl_release_validate_bo(struct qxl_bo *bo)
226 {
227 struct ttm_operation_ctx ctx = { true, false };
228 int ret;
229
230 if (!bo->pin_count) {
231 qxl_ttm_placement_from_domain(bo, bo->type, false);
232 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
233 if (ret)
234 return ret;
235 }
236
237 ret = reservation_object_reserve_shared(bo->tbo.resv);
238 if (ret)
239 return ret;
240
241 /* allocate a surface for reserved + validated buffers */
242 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
243 if (ret)
244 return ret;
245 return 0;
246 }
247
qxl_release_reserve_list(struct qxl_release * release,bool no_intr)248 int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
249 {
250 int ret;
251 struct qxl_bo_list *entry;
252
253 /* if only one object on the release its the release itself
254 since these objects are pinned no need to reserve */
255 if (list_is_singular(&release->bos))
256 return 0;
257
258 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
259 !no_intr, NULL);
260 if (ret)
261 return ret;
262
263 list_for_each_entry(entry, &release->bos, tv.head) {
264 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
265
266 ret = qxl_release_validate_bo(bo);
267 if (ret) {
268 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
269 return ret;
270 }
271 }
272 return 0;
273 }
274
qxl_release_backoff_reserve_list(struct qxl_release * release)275 void qxl_release_backoff_reserve_list(struct qxl_release *release)
276 {
277 /* if only one object on the release its the release itself
278 since these objects are pinned no need to reserve */
279 if (list_is_singular(&release->bos))
280 return;
281
282 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
283 }
284
285
qxl_alloc_surface_release_reserved(struct qxl_device * qdev,enum qxl_surface_cmd_type surface_cmd_type,struct qxl_release * create_rel,struct qxl_release ** release)286 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
287 enum qxl_surface_cmd_type surface_cmd_type,
288 struct qxl_release *create_rel,
289 struct qxl_release **release)
290 {
291 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
292 int idr_ret;
293 struct qxl_bo *bo;
294 union qxl_release_info *info;
295
296 /* stash the release after the create command */
297 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
298 if (idr_ret < 0)
299 return idr_ret;
300 bo = create_rel->release_bo;
301
302 (*release)->release_bo = bo;
303 (*release)->release_offset = create_rel->release_offset + 64;
304
305 qxl_release_list_add(*release, bo);
306
307 info = qxl_release_map(qdev, *release);
308 info->id = idr_ret;
309 qxl_release_unmap(qdev, *release, info);
310 return 0;
311 }
312
313 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
314 QXL_RELEASE_SURFACE_CMD, release, NULL);
315 }
316
qxl_alloc_release_reserved(struct qxl_device * qdev,unsigned long size,int type,struct qxl_release ** release,struct qxl_bo ** rbo)317 int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
318 int type, struct qxl_release **release,
319 struct qxl_bo **rbo)
320 {
321 struct qxl_bo *bo;
322 int idr_ret;
323 int ret = 0;
324 union qxl_release_info *info;
325 int cur_idx;
326
327 if (type == QXL_RELEASE_DRAWABLE)
328 cur_idx = 0;
329 else if (type == QXL_RELEASE_SURFACE_CMD)
330 cur_idx = 1;
331 else if (type == QXL_RELEASE_CURSOR_CMD)
332 cur_idx = 2;
333 else {
334 DRM_ERROR("got illegal type: %d\n", type);
335 return -EINVAL;
336 }
337
338 idr_ret = qxl_release_alloc(qdev, type, release);
339 if (idr_ret < 0) {
340 if (rbo)
341 *rbo = NULL;
342 return idr_ret;
343 }
344
345 mutex_lock(&qdev->release_mutex);
346 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
347 qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
348 qdev->current_release_bo_offset[cur_idx] = 0;
349 qdev->current_release_bo[cur_idx] = NULL;
350 }
351 if (!qdev->current_release_bo[cur_idx]) {
352 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
353 if (ret) {
354 mutex_unlock(&qdev->release_mutex);
355 qxl_release_free(qdev, *release);
356 return ret;
357 }
358 }
359
360 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
361
362 (*release)->release_bo = bo;
363 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
364 qdev->current_release_bo_offset[cur_idx]++;
365
366 if (rbo)
367 *rbo = bo;
368
369 mutex_unlock(&qdev->release_mutex);
370
371 ret = qxl_release_list_add(*release, bo);
372 qxl_bo_unref(&bo);
373 if (ret) {
374 qxl_release_free(qdev, *release);
375 return ret;
376 }
377
378 info = qxl_release_map(qdev, *release);
379 info->id = idr_ret;
380 qxl_release_unmap(qdev, *release, info);
381
382 return ret;
383 }
384
qxl_release_from_id_locked(struct qxl_device * qdev,uint64_t id)385 struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
386 uint64_t id)
387 {
388 struct qxl_release *release;
389
390 spin_lock(&qdev->release_idr_lock);
391 release = idr_find(&qdev->release_idr, id);
392 spin_unlock(&qdev->release_idr_lock);
393 if (!release) {
394 DRM_ERROR("failed to find id in release_idr\n");
395 return NULL;
396 }
397
398 return release;
399 }
400
qxl_release_map(struct qxl_device * qdev,struct qxl_release * release)401 union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
402 struct qxl_release *release)
403 {
404 void *ptr;
405 union qxl_release_info *info;
406 struct qxl_bo *bo = release->release_bo;
407
408 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
409 if (!ptr)
410 return NULL;
411 info = ptr + (release->release_offset & ~PAGE_MASK);
412 return info;
413 }
414
qxl_release_unmap(struct qxl_device * qdev,struct qxl_release * release,union qxl_release_info * info)415 void qxl_release_unmap(struct qxl_device *qdev,
416 struct qxl_release *release,
417 union qxl_release_info *info)
418 {
419 struct qxl_bo *bo = release->release_bo;
420 void *ptr;
421
422 ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
423 qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
424 }
425
qxl_release_fence_buffer_objects(struct qxl_release * release)426 void qxl_release_fence_buffer_objects(struct qxl_release *release)
427 {
428 struct ttm_buffer_object *bo;
429 struct ttm_bo_global *glob;
430 struct ttm_bo_device *bdev;
431 struct ttm_bo_driver *driver;
432 struct qxl_bo *qbo;
433 struct ttm_validate_buffer *entry;
434 struct qxl_device *qdev;
435
436 /* if only one object on the release its the release itself
437 since these objects are pinned no need to reserve */
438 if (list_is_singular(&release->bos) || list_empty(&release->bos))
439 return;
440
441 bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
442 bdev = bo->bdev;
443 qdev = container_of(bdev, struct qxl_device, mman.bdev);
444
445 /*
446 * Since we never really allocated a context and we don't want to conflict,
447 * set the highest bits. This will break if we really allow exporting of dma-bufs.
448 */
449 dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
450 release->id | 0xf0000000, release->base.seqno);
451 trace_dma_fence_emit(&release->base);
452
453 driver = bdev->driver;
454 glob = bdev->glob;
455
456 spin_lock(&glob->lru_lock);
457
458 list_for_each_entry(entry, &release->bos, head) {
459 bo = entry->bo;
460 qbo = to_qxl_bo(bo);
461
462 reservation_object_add_shared_fence(bo->resv, &release->base);
463 ttm_bo_add_to_lru(bo);
464 reservation_object_unlock(bo->resv);
465 }
466 spin_unlock(&glob->lru_lock);
467 ww_acquire_fini(&release->ticket);
468 }
469
470