1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3 *
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include <drm/ttm/ttm_execbuf_util.h>
30 #include <drm/ttm/ttm_bo_driver.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include <linux/wait.h>
33 #include <linux/sched.h>
34 #include <linux/module.h>
35
ttm_eu_backoff_reservation_reverse(struct list_head * list,struct ttm_validate_buffer * entry)36 static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
37 struct ttm_validate_buffer *entry)
38 {
39 list_for_each_entry_continue_reverse(entry, list, head) {
40 struct ttm_buffer_object *bo = entry->bo;
41
42 dma_resv_unlock(bo->base.resv);
43 }
44 }
45
ttm_eu_backoff_reservation(struct ww_acquire_ctx * ticket,struct list_head * list)46 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
47 struct list_head *list)
48 {
49 struct ttm_validate_buffer *entry;
50
51 if (list_empty(list))
52 return;
53
54 spin_lock(&ttm_bo_glob.lru_lock);
55 list_for_each_entry(entry, list, head) {
56 struct ttm_buffer_object *bo = entry->bo;
57
58 ttm_bo_move_to_lru_tail(bo, NULL);
59 dma_resv_unlock(bo->base.resv);
60 }
61 spin_unlock(&ttm_bo_glob.lru_lock);
62
63 if (ticket)
64 ww_acquire_fini(ticket);
65 }
66 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
67
68 /*
69 * Reserve buffers for validation.
70 *
71 * If a buffer in the list is marked for CPU access, we back off and
72 * wait for that buffer to become free for GPU access.
73 *
74 * If a buffer is reserved for another validation, the validator with
75 * the highest validation sequence backs off and waits for that buffer
76 * to become unreserved. This prevents deadlocks when validating multiple
77 * buffers in different orders.
78 */
79
ttm_eu_reserve_buffers(struct ww_acquire_ctx * ticket,struct list_head * list,bool intr,struct list_head * dups)80 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
81 struct list_head *list, bool intr,
82 struct list_head *dups)
83 {
84 struct ttm_validate_buffer *entry;
85 int ret;
86
87 if (list_empty(list))
88 return 0;
89
90 if (ticket)
91 ww_acquire_init(ticket, &reservation_ww_class);
92
93 list_for_each_entry(entry, list, head) {
94 struct ttm_buffer_object *bo = entry->bo;
95
96 ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
97 if (ret == -EALREADY && dups) {
98 struct ttm_validate_buffer *safe = entry;
99 entry = list_prev_entry(entry, head);
100 list_del(&safe->head);
101 list_add(&safe->head, dups);
102 continue;
103 }
104
105 if (!ret) {
106 if (!entry->num_shared)
107 continue;
108
109 ret = dma_resv_reserve_shared(bo->base.resv,
110 entry->num_shared);
111 if (!ret)
112 continue;
113 }
114
115 /* uh oh, we lost out, drop every reservation and try
116 * to only reserve this buffer, then start over if
117 * this succeeds.
118 */
119 ttm_eu_backoff_reservation_reverse(list, entry);
120
121 if (ret == -EDEADLK) {
122 ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
123 }
124
125 if (!ret && entry->num_shared)
126 ret = dma_resv_reserve_shared(bo->base.resv,
127 entry->num_shared);
128
129 if (unlikely(ret != 0)) {
130 if (ticket) {
131 ww_acquire_done(ticket);
132 ww_acquire_fini(ticket);
133 }
134 return ret;
135 }
136
137 /* move this item to the front of the list,
138 * forces correct iteration of the loop without keeping track
139 */
140 list_del(&entry->head);
141 list_add(&entry->head, list);
142 }
143
144 return 0;
145 }
146 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
147
ttm_eu_fence_buffer_objects(struct ww_acquire_ctx * ticket,struct list_head * list,struct dma_fence * fence)148 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
149 struct list_head *list,
150 struct dma_fence *fence)
151 {
152 struct ttm_validate_buffer *entry;
153
154 if (list_empty(list))
155 return;
156
157 spin_lock(&ttm_bo_glob.lru_lock);
158 list_for_each_entry(entry, list, head) {
159 struct ttm_buffer_object *bo = entry->bo;
160
161 if (entry->num_shared)
162 dma_resv_add_shared_fence(bo->base.resv, fence);
163 else
164 dma_resv_add_excl_fence(bo->base.resv, fence);
165 ttm_bo_move_to_lru_tail(bo, NULL);
166 dma_resv_unlock(bo->base.resv);
167 }
168 spin_unlock(&ttm_bo_glob.lru_lock);
169 if (ticket)
170 ww_acquire_fini(ticket);
171 }
172 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
173