1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright © 2018-2019 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include <drm/ttm/ttm_placement.h>
30
31 #include "vmwgfx_drv.h"
32 #include "vmwgfx_resource_priv.h"
33 #include "vmwgfx_binding.h"
34
35 /**
36 * struct vmw_dx_streamoutput - Streamoutput resource metadata.
37 * @res: Base resource struct.
38 * @ctx: Non-refcounted context to which @res belong.
39 * @cotable: Refcounted cotable holding this Streamoutput.
40 * @cotable_head: List head for cotable-so_res list.
41 * @id: User-space provided identifier.
42 * @size: User-space provided mob size.
43 * @committed: Whether streamoutput is actually created or pending creation.
44 */
45 struct vmw_dx_streamoutput {
46 struct vmw_resource res;
47 struct vmw_resource *ctx;
48 struct vmw_resource *cotable;
49 struct list_head cotable_head;
50 u32 id;
51 u32 size;
52 bool committed;
53 };
54
55 static int vmw_dx_streamoutput_create(struct vmw_resource *res);
56 static int vmw_dx_streamoutput_bind(struct vmw_resource *res,
57 struct ttm_validate_buffer *val_buf);
58 static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
59 struct ttm_validate_buffer *val_buf);
60 static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res,
61 enum vmw_cmdbuf_res_state state);
62
63 static size_t vmw_streamoutput_size;
64
65 static const struct vmw_res_func vmw_dx_streamoutput_func = {
66 .res_type = vmw_res_streamoutput,
67 .needs_backup = true,
68 .may_evict = false,
69 .type_name = "DX streamoutput",
70 .backup_placement = &vmw_mob_placement,
71 .create = vmw_dx_streamoutput_create,
72 .destroy = NULL, /* Command buffer managed resource. */
73 .bind = vmw_dx_streamoutput_bind,
74 .unbind = vmw_dx_streamoutput_unbind,
75 .commit_notify = vmw_dx_streamoutput_commit_notify,
76 };
77
78 static inline struct vmw_dx_streamoutput *
vmw_res_to_dx_streamoutput(struct vmw_resource * res)79 vmw_res_to_dx_streamoutput(struct vmw_resource *res)
80 {
81 return container_of(res, struct vmw_dx_streamoutput, res);
82 }
83
84 /**
85 * vmw_dx_streamoutput_unscrub - Reattach the MOB to streamoutput.
86 * @res: The streamoutput resource.
87 *
88 * Return: 0 on success, negative error code on failure.
89 */
vmw_dx_streamoutput_unscrub(struct vmw_resource * res)90 static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
91 {
92 struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
93 struct vmw_private *dev_priv = res->dev_priv;
94 struct {
95 SVGA3dCmdHeader header;
96 SVGA3dCmdDXBindStreamOutput body;
97 } *cmd;
98
99 if (!list_empty(&so->cotable_head) || !so->committed )
100 return 0;
101
102 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), so->ctx->id);
103 if (!cmd)
104 return -ENOMEM;
105
106 cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT;
107 cmd->header.size = sizeof(cmd->body);
108 cmd->body.soid = so->id;
109 cmd->body.mobid = res->backup->base.resource->start;
110 cmd->body.offsetInBytes = res->backup_offset;
111 cmd->body.sizeInBytes = so->size;
112 vmw_cmd_commit(dev_priv, sizeof(*cmd));
113
114 vmw_cotable_add_resource(so->cotable, &so->cotable_head);
115
116 return 0;
117 }
118
vmw_dx_streamoutput_create(struct vmw_resource * res)119 static int vmw_dx_streamoutput_create(struct vmw_resource *res)
120 {
121 struct vmw_private *dev_priv = res->dev_priv;
122 struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
123 int ret = 0;
124
125 WARN_ON_ONCE(!so->committed);
126
127 if (vmw_resource_mob_attached(res)) {
128 mutex_lock(&dev_priv->binding_mutex);
129 ret = vmw_dx_streamoutput_unscrub(res);
130 mutex_unlock(&dev_priv->binding_mutex);
131 }
132
133 res->id = so->id;
134
135 return ret;
136 }
137
vmw_dx_streamoutput_bind(struct vmw_resource * res,struct ttm_validate_buffer * val_buf)138 static int vmw_dx_streamoutput_bind(struct vmw_resource *res,
139 struct ttm_validate_buffer *val_buf)
140 {
141 struct vmw_private *dev_priv = res->dev_priv;
142 struct ttm_buffer_object *bo = val_buf->bo;
143 int ret;
144
145 if (WARN_ON(bo->resource->mem_type != VMW_PL_MOB))
146 return -EINVAL;
147
148 mutex_lock(&dev_priv->binding_mutex);
149 ret = vmw_dx_streamoutput_unscrub(res);
150 mutex_unlock(&dev_priv->binding_mutex);
151
152 return ret;
153 }
154
155 /**
156 * vmw_dx_streamoutput_scrub - Unbind the MOB from streamoutput.
157 * @res: The streamoutput resource.
158 *
159 * Return: 0 on success, negative error code on failure.
160 */
vmw_dx_streamoutput_scrub(struct vmw_resource * res)161 static int vmw_dx_streamoutput_scrub(struct vmw_resource *res)
162 {
163 struct vmw_private *dev_priv = res->dev_priv;
164 struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
165 struct {
166 SVGA3dCmdHeader header;
167 SVGA3dCmdDXBindStreamOutput body;
168 } *cmd;
169
170 if (list_empty(&so->cotable_head))
171 return 0;
172
173 WARN_ON_ONCE(!so->committed);
174
175 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), so->ctx->id);
176 if (!cmd)
177 return -ENOMEM;
178
179 cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT;
180 cmd->header.size = sizeof(cmd->body);
181 cmd->body.soid = res->id;
182 cmd->body.mobid = SVGA3D_INVALID_ID;
183 cmd->body.offsetInBytes = 0;
184 cmd->body.sizeInBytes = so->size;
185 vmw_cmd_commit(dev_priv, sizeof(*cmd));
186
187 res->id = -1;
188 list_del_init(&so->cotable_head);
189
190 return 0;
191 }
192
vmw_dx_streamoutput_unbind(struct vmw_resource * res,bool readback,struct ttm_validate_buffer * val_buf)193 static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
194 struct ttm_validate_buffer *val_buf)
195 {
196 struct vmw_private *dev_priv = res->dev_priv;
197 struct vmw_fence_obj *fence;
198 int ret;
199
200 if (WARN_ON(res->backup->base.resource->mem_type != VMW_PL_MOB))
201 return -EINVAL;
202
203 mutex_lock(&dev_priv->binding_mutex);
204 ret = vmw_dx_streamoutput_scrub(res);
205 mutex_unlock(&dev_priv->binding_mutex);
206
207 if (ret)
208 return ret;
209
210 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
211 vmw_bo_fence_single(val_buf->bo, fence);
212
213 if (fence != NULL)
214 vmw_fence_obj_unreference(&fence);
215
216 return 0;
217 }
218
vmw_dx_streamoutput_commit_notify(struct vmw_resource * res,enum vmw_cmdbuf_res_state state)219 static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res,
220 enum vmw_cmdbuf_res_state state)
221 {
222 struct vmw_private *dev_priv = res->dev_priv;
223 struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
224
225 if (state == VMW_CMDBUF_RES_ADD) {
226 mutex_lock(&dev_priv->binding_mutex);
227 vmw_cotable_add_resource(so->cotable, &so->cotable_head);
228 so->committed = true;
229 res->id = so->id;
230 mutex_unlock(&dev_priv->binding_mutex);
231 } else {
232 mutex_lock(&dev_priv->binding_mutex);
233 list_del_init(&so->cotable_head);
234 so->committed = false;
235 res->id = -1;
236 mutex_unlock(&dev_priv->binding_mutex);
237 }
238 }
239
240 /**
241 * vmw_dx_streamoutput_lookup - Do a streamoutput resource lookup by user key.
242 * @man: Command buffer managed resource manager for current context.
243 * @user_key: User-space identifier for lookup.
244 *
245 * Return: Valid refcounted vmw_resource on success, error pointer on failure.
246 */
247 struct vmw_resource *
vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager * man,u32 user_key)248 vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man,
249 u32 user_key)
250 {
251 return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_streamoutput,
252 user_key);
253 }
254
vmw_dx_streamoutput_res_free(struct vmw_resource * res)255 static void vmw_dx_streamoutput_res_free(struct vmw_resource *res)
256 {
257 struct vmw_private *dev_priv = res->dev_priv;
258 struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
259
260 vmw_resource_unreference(&so->cotable);
261 kfree(so);
262 ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_streamoutput_size);
263 }
264
vmw_dx_streamoutput_hw_destroy(struct vmw_resource * res)265 static void vmw_dx_streamoutput_hw_destroy(struct vmw_resource *res)
266 {
267 /* Destroyed by user-space cmd buf or as part of context takedown. */
268 res->id = -1;
269 }
270
271 /**
272 * vmw_dx_streamoutput_add - Add a streamoutput as a cmd buf managed resource.
273 * @man: Command buffer managed resource manager for current context.
274 * @ctx: Pointer to context resource.
275 * @user_key: The identifier for this streamoutput.
276 * @list: The list of staged command buffer managed resources.
277 *
278 * Return: 0 on success, negative error code on failure.
279 */
vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager * man,struct vmw_resource * ctx,u32 user_key,struct list_head * list)280 int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man,
281 struct vmw_resource *ctx, u32 user_key,
282 struct list_head *list)
283 {
284 struct vmw_dx_streamoutput *so;
285 struct vmw_resource *res;
286 struct vmw_private *dev_priv = ctx->dev_priv;
287 struct ttm_operation_ctx ttm_opt_ctx = {
288 .interruptible = true,
289 .no_wait_gpu = false
290 };
291 int ret;
292
293 if (!vmw_streamoutput_size)
294 vmw_streamoutput_size = ttm_round_pot(sizeof(*so));
295
296 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
297 vmw_streamoutput_size, &ttm_opt_ctx);
298 if (ret) {
299 if (ret != -ERESTARTSYS)
300 DRM_ERROR("Out of graphics memory for streamout.\n");
301 return ret;
302 }
303
304 so = kmalloc(sizeof(*so), GFP_KERNEL);
305 if (!so) {
306 ttm_mem_global_free(vmw_mem_glob(dev_priv),
307 vmw_streamoutput_size);
308 return -ENOMEM;
309 }
310
311 res = &so->res;
312 so->ctx = ctx;
313 so->cotable = vmw_resource_reference
314 (vmw_context_cotable(ctx, SVGA_COTABLE_STREAMOUTPUT));
315 so->id = user_key;
316 so->committed = false;
317 INIT_LIST_HEAD(&so->cotable_head);
318 ret = vmw_resource_init(dev_priv, res, true,
319 vmw_dx_streamoutput_res_free,
320 &vmw_dx_streamoutput_func);
321 if (ret)
322 goto out_resource_init;
323
324 ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_streamoutput, user_key,
325 res, list);
326 if (ret)
327 goto out_resource_init;
328
329 res->id = so->id;
330 res->hw_destroy = vmw_dx_streamoutput_hw_destroy;
331
332 out_resource_init:
333 vmw_resource_unreference(&res);
334
335 return ret;
336 }
337
338 /**
339 * vmw_dx_streamoutput_set_size - Sets streamoutput mob size in res struct.
340 * @res: The streamoutput res for which need to set size.
341 * @size: The size provided by user-space to set.
342 */
vmw_dx_streamoutput_set_size(struct vmw_resource * res,u32 size)343 void vmw_dx_streamoutput_set_size(struct vmw_resource *res, u32 size)
344 {
345 struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
346
347 so->size = size;
348 }
349
350 /**
351 * vmw_dx_streamoutput_remove - Stage streamoutput for removal.
352 * @man: Command buffer managed resource manager for current context.
353 * @user_key: The identifier for this streamoutput.
354 * @list: The list of staged command buffer managed resources.
355 *
356 * Return: 0 on success, negative error code on failure.
357 */
vmw_dx_streamoutput_remove(struct vmw_cmdbuf_res_manager * man,u32 user_key,struct list_head * list)358 int vmw_dx_streamoutput_remove(struct vmw_cmdbuf_res_manager *man,
359 u32 user_key,
360 struct list_head *list)
361 {
362 struct vmw_resource *r;
363
364 return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_streamoutput,
365 (u32)user_key, list, &r);
366 }
367
368 /**
369 * vmw_dx_streamoutput_cotable_list_scrub - cotable unbind_func callback.
370 * @dev_priv: Device private.
371 * @list: The list of cotable resources.
372 * @readback: Whether the call was part of a readback unbind.
373 */
vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private * dev_priv,struct list_head * list,bool readback)374 void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv,
375 struct list_head *list,
376 bool readback)
377 {
378 struct vmw_dx_streamoutput *entry, *next;
379
380 lockdep_assert_held_once(&dev_priv->binding_mutex);
381
382 list_for_each_entry_safe(entry, next, list, cotable_head) {
383 WARN_ON(vmw_dx_streamoutput_scrub(&entry->res));
384 if (!readback)
385 entry->committed =false;
386 }
387 }
388