1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 /*
28 * Treat context OTables as resources to make use of the resource
29 * backing MOB eviction mechanism, that is used to read back the COTable
30 * whenever the backing MOB is evicted.
31 */
32
33 #include <drm/ttm/ttm_placement.h>
34
35 #include "vmwgfx_drv.h"
36 #include "vmwgfx_resource_priv.h"
37 #include "vmwgfx_so.h"
38
39 /**
40 * struct vmw_cotable - Context Object Table resource
41 *
42 * @res: struct vmw_resource we are deriving from.
43 * @ctx: non-refcounted pointer to the owning context.
44 * @size_read_back: Size of data read back during eviction.
45 * @seen_entries: Seen entries in command stream for this cotable.
46 * @type: The cotable type.
47 * @scrubbed: Whether the cotable has been scrubbed.
48 * @resource_list: List of resources in the cotable.
49 */
50 struct vmw_cotable {
51 struct vmw_resource res;
52 struct vmw_resource *ctx;
53 size_t size_read_back;
54 int seen_entries;
55 u32 type;
56 bool scrubbed;
57 struct list_head resource_list;
58 };
59
60 /**
61 * struct vmw_cotable_info - Static info about cotable types
62 *
63 * @min_initial_entries: Min number of initial intries at cotable allocation
64 * for this cotable type.
65 * @size: Size of each entry.
66 */
67 struct vmw_cotable_info {
68 u32 min_initial_entries;
69 u32 size;
70 void (*unbind_func)(struct vmw_private *, struct list_head *,
71 bool);
72 };
73
74 static const struct vmw_cotable_info co_info[] = {
75 {1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
76 {1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
77 {1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
78 {1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
79 {1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
80 {1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
81 {1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
82 {1, sizeof(SVGACOTableDXSamplerEntry), NULL},
83 {1, sizeof(SVGACOTableDXStreamOutputEntry), NULL},
84 {1, sizeof(SVGACOTableDXQueryEntry), NULL},
85 {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub}
86 };
87
88 /*
89 * Cotables with bindings that we remove must be scrubbed first,
90 * otherwise, the device will swap in an invalid context when we remove
91 * bindings before scrubbing a cotable...
92 */
93 const SVGACOTableType vmw_cotable_scrub_order[] = {
94 SVGA_COTABLE_RTVIEW,
95 SVGA_COTABLE_DSVIEW,
96 SVGA_COTABLE_SRVIEW,
97 SVGA_COTABLE_DXSHADER,
98 SVGA_COTABLE_ELEMENTLAYOUT,
99 SVGA_COTABLE_BLENDSTATE,
100 SVGA_COTABLE_DEPTHSTENCIL,
101 SVGA_COTABLE_RASTERIZERSTATE,
102 SVGA_COTABLE_SAMPLER,
103 SVGA_COTABLE_STREAMOUTPUT,
104 SVGA_COTABLE_DXQUERY,
105 };
106
107 static int vmw_cotable_bind(struct vmw_resource *res,
108 struct ttm_validate_buffer *val_buf);
109 static int vmw_cotable_unbind(struct vmw_resource *res,
110 bool readback,
111 struct ttm_validate_buffer *val_buf);
112 static int vmw_cotable_create(struct vmw_resource *res);
113 static int vmw_cotable_destroy(struct vmw_resource *res);
114
115 static const struct vmw_res_func vmw_cotable_func = {
116 .res_type = vmw_res_cotable,
117 .needs_backup = true,
118 .may_evict = true,
119 .prio = 3,
120 .dirty_prio = 3,
121 .type_name = "context guest backed object tables",
122 .backup_placement = &vmw_mob_placement,
123 .create = vmw_cotable_create,
124 .destroy = vmw_cotable_destroy,
125 .bind = vmw_cotable_bind,
126 .unbind = vmw_cotable_unbind,
127 };
128
129 /**
130 * vmw_cotable - Convert a struct vmw_resource pointer to a struct
131 * vmw_cotable pointer
132 *
133 * @res: Pointer to the resource.
134 */
vmw_cotable(struct vmw_resource * res)135 static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
136 {
137 return container_of(res, struct vmw_cotable, res);
138 }
139
140 /**
141 * vmw_cotable_destroy - Cotable resource destroy callback
142 *
143 * @res: Pointer to the cotable resource.
144 *
145 * There is no device cotable destroy command, so this function only
146 * makes sure that the resource id is set to invalid.
147 */
vmw_cotable_destroy(struct vmw_resource * res)148 static int vmw_cotable_destroy(struct vmw_resource *res)
149 {
150 res->id = -1;
151 return 0;
152 }
153
154 /**
155 * vmw_cotable_unscrub - Undo a cotable unscrub operation
156 *
157 * @res: Pointer to the cotable resource
158 *
159 * This function issues commands to (re)bind the cotable to
160 * its backing mob, which needs to be validated and reserved at this point.
161 * This is identical to bind() except the function interface looks different.
162 */
vmw_cotable_unscrub(struct vmw_resource * res)163 static int vmw_cotable_unscrub(struct vmw_resource *res)
164 {
165 struct vmw_cotable *vcotbl = vmw_cotable(res);
166 struct vmw_private *dev_priv = res->dev_priv;
167 struct ttm_buffer_object *bo = &res->backup->base;
168 struct {
169 SVGA3dCmdHeader header;
170 SVGA3dCmdDXSetCOTable body;
171 } *cmd;
172
173 WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
174 dma_resv_assert_held(bo->base.resv);
175
176 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
177 if (!cmd)
178 return -ENOMEM;
179
180 WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
181 WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
182 cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
183 cmd->header.size = sizeof(cmd->body);
184 cmd->body.cid = vcotbl->ctx->id;
185 cmd->body.type = vcotbl->type;
186 cmd->body.mobid = bo->mem.start;
187 cmd->body.validSizeInBytes = vcotbl->size_read_back;
188
189 vmw_fifo_commit_flush(dev_priv, sizeof(*cmd));
190 vcotbl->scrubbed = false;
191
192 return 0;
193 }
194
195 /**
196 * vmw_cotable_bind - Undo a cotable unscrub operation
197 *
198 * @res: Pointer to the cotable resource
199 * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
200 * for convenience / fencing.
201 *
202 * This function issues commands to (re)bind the cotable to
203 * its backing mob, which needs to be validated and reserved at this point.
204 */
vmw_cotable_bind(struct vmw_resource * res,struct ttm_validate_buffer * val_buf)205 static int vmw_cotable_bind(struct vmw_resource *res,
206 struct ttm_validate_buffer *val_buf)
207 {
208 /*
209 * The create() callback may have changed @res->backup without
210 * the caller noticing, and with val_buf->bo still pointing to
211 * the old backup buffer. Although hackish, and not used currently,
212 * take the opportunity to correct the value here so that it's not
213 * misused in the future.
214 */
215 val_buf->bo = &res->backup->base;
216
217 return vmw_cotable_unscrub(res);
218 }
219
220 /**
221 * vmw_cotable_scrub - Scrub the cotable from the device.
222 *
223 * @res: Pointer to the cotable resource.
224 * @readback: Whether initiate a readback of the cotable data to the backup
225 * buffer.
226 *
227 * In some situations (context swapouts) it might be desirable to make the
228 * device forget about the cotable without performing a full unbind. A full
229 * unbind requires reserved backup buffers and it might not be possible to
230 * reserve them due to locking order violation issues. The vmw_cotable_scrub
231 * function implements a partial unbind() without that requirement but with the
232 * following restrictions.
233 * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must
234 * be called.
235 * 2) Before the cotable backing buffer is used by the CPU, or during the
236 * resource destruction, vmw_cotable_unbind() must be called.
237 */
vmw_cotable_scrub(struct vmw_resource * res,bool readback)238 int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
239 {
240 struct vmw_cotable *vcotbl = vmw_cotable(res);
241 struct vmw_private *dev_priv = res->dev_priv;
242 size_t submit_size;
243
244 struct {
245 SVGA3dCmdHeader header;
246 SVGA3dCmdDXReadbackCOTable body;
247 } *cmd0;
248 struct {
249 SVGA3dCmdHeader header;
250 SVGA3dCmdDXSetCOTable body;
251 } *cmd1;
252
253 if (vcotbl->scrubbed)
254 return 0;
255
256 if (co_info[vcotbl->type].unbind_func)
257 co_info[vcotbl->type].unbind_func(dev_priv,
258 &vcotbl->resource_list,
259 readback);
260 submit_size = sizeof(*cmd1);
261 if (readback)
262 submit_size += sizeof(*cmd0);
263
264 cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
265 if (!cmd1)
266 return -ENOMEM;
267
268 vcotbl->size_read_back = 0;
269 if (readback) {
270 cmd0 = (void *) cmd1;
271 cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
272 cmd0->header.size = sizeof(cmd0->body);
273 cmd0->body.cid = vcotbl->ctx->id;
274 cmd0->body.type = vcotbl->type;
275 cmd1 = (void *) &cmd0[1];
276 vcotbl->size_read_back = res->backup_size;
277 }
278 cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
279 cmd1->header.size = sizeof(cmd1->body);
280 cmd1->body.cid = vcotbl->ctx->id;
281 cmd1->body.type = vcotbl->type;
282 cmd1->body.mobid = SVGA3D_INVALID_ID;
283 cmd1->body.validSizeInBytes = 0;
284 vmw_fifo_commit_flush(dev_priv, submit_size);
285 vcotbl->scrubbed = true;
286
287 /* Trigger a create() on next validate. */
288 res->id = -1;
289
290 return 0;
291 }
292
293 /**
294 * vmw_cotable_unbind - Cotable resource unbind callback
295 *
296 * @res: Pointer to the cotable resource.
297 * @readback: Whether to read back cotable data to the backup buffer.
298 * val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
299 * for convenience / fencing.
300 *
301 * Unbinds the cotable from the device and fences the backup buffer.
302 */
vmw_cotable_unbind(struct vmw_resource * res,bool readback,struct ttm_validate_buffer * val_buf)303 static int vmw_cotable_unbind(struct vmw_resource *res,
304 bool readback,
305 struct ttm_validate_buffer *val_buf)
306 {
307 struct vmw_cotable *vcotbl = vmw_cotable(res);
308 struct vmw_private *dev_priv = res->dev_priv;
309 struct ttm_buffer_object *bo = val_buf->bo;
310 struct vmw_fence_obj *fence;
311
312 if (!vmw_resource_mob_attached(res))
313 return 0;
314
315 WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
316 dma_resv_assert_held(bo->base.resv);
317
318 mutex_lock(&dev_priv->binding_mutex);
319 if (!vcotbl->scrubbed)
320 vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
321 mutex_unlock(&dev_priv->binding_mutex);
322 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
323 vmw_bo_fence_single(bo, fence);
324 if (likely(fence != NULL))
325 vmw_fence_obj_unreference(&fence);
326
327 return 0;
328 }
329
330 /**
331 * vmw_cotable_readback - Read back a cotable without unbinding.
332 *
333 * @res: The cotable resource.
334 *
335 * Reads back a cotable to its backing mob without scrubbing the MOB from
336 * the cotable. The MOB is fenced for subsequent CPU access.
337 */
vmw_cotable_readback(struct vmw_resource * res)338 static int vmw_cotable_readback(struct vmw_resource *res)
339 {
340 struct vmw_cotable *vcotbl = vmw_cotable(res);
341 struct vmw_private *dev_priv = res->dev_priv;
342
343 struct {
344 SVGA3dCmdHeader header;
345 SVGA3dCmdDXReadbackCOTable body;
346 } *cmd;
347 struct vmw_fence_obj *fence;
348
349 if (!vcotbl->scrubbed) {
350 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
351 if (!cmd)
352 return -ENOMEM;
353
354 cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
355 cmd->header.size = sizeof(cmd->body);
356 cmd->body.cid = vcotbl->ctx->id;
357 cmd->body.type = vcotbl->type;
358 vcotbl->size_read_back = res->backup_size;
359 vmw_fifo_commit(dev_priv, sizeof(*cmd));
360 }
361
362 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
363 vmw_bo_fence_single(&res->backup->base, fence);
364 vmw_fence_obj_unreference(&fence);
365
366 return 0;
367 }
368
369 /**
370 * vmw_cotable_resize - Resize a cotable.
371 *
372 * @res: The cotable resource.
373 * @new_size: The new size.
374 *
375 * Resizes a cotable and binds the new backup buffer.
376 * On failure the cotable is left intact.
377 * Important! This function may not fail once the MOB switch has been
378 * committed to hardware. That would put the device context in an
379 * invalid state which we can't currently recover from.
380 */
vmw_cotable_resize(struct vmw_resource * res,size_t new_size)381 static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
382 {
383 struct ttm_operation_ctx ctx = { false, false };
384 struct vmw_private *dev_priv = res->dev_priv;
385 struct vmw_cotable *vcotbl = vmw_cotable(res);
386 struct vmw_buffer_object *buf, *old_buf = res->backup;
387 struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
388 size_t old_size = res->backup_size;
389 size_t old_size_read_back = vcotbl->size_read_back;
390 size_t cur_size_read_back;
391 struct ttm_bo_kmap_obj old_map, new_map;
392 int ret;
393 size_t i;
394
395 ret = vmw_cotable_readback(res);
396 if (ret)
397 return ret;
398
399 cur_size_read_back = vcotbl->size_read_back;
400 vcotbl->size_read_back = old_size_read_back;
401
402 /*
403 * While device is processing, Allocate and reserve a buffer object
404 * for the new COTable. Initially pin the buffer object to make sure
405 * we can use tryreserve without failure.
406 */
407 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
408 if (!buf)
409 return -ENOMEM;
410
411 ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
412 true, vmw_bo_bo_free);
413 if (ret) {
414 DRM_ERROR("Failed initializing new cotable MOB.\n");
415 return ret;
416 }
417
418 bo = &buf->base;
419 WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL));
420
421 ret = ttm_bo_wait(old_bo, false, false);
422 if (unlikely(ret != 0)) {
423 DRM_ERROR("Failed waiting for cotable unbind.\n");
424 goto out_wait;
425 }
426
427 /*
428 * Do a page by page copy of COTables. This eliminates slow vmap()s.
429 * This should really be a TTM utility.
430 */
431 for (i = 0; i < old_bo->num_pages; ++i) {
432 bool dummy;
433
434 ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
435 if (unlikely(ret != 0)) {
436 DRM_ERROR("Failed mapping old COTable on resize.\n");
437 goto out_wait;
438 }
439 ret = ttm_bo_kmap(bo, i, 1, &new_map);
440 if (unlikely(ret != 0)) {
441 DRM_ERROR("Failed mapping new COTable on resize.\n");
442 goto out_map_new;
443 }
444 memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
445 ttm_kmap_obj_virtual(&old_map, &dummy),
446 PAGE_SIZE);
447 ttm_bo_kunmap(&new_map);
448 ttm_bo_kunmap(&old_map);
449 }
450
451 /* Unpin new buffer, and switch backup buffers. */
452 ret = ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
453 if (unlikely(ret != 0)) {
454 DRM_ERROR("Failed validating new COTable backup buffer.\n");
455 goto out_wait;
456 }
457
458 vmw_resource_mob_detach(res);
459 res->backup = buf;
460 res->backup_size = new_size;
461 vcotbl->size_read_back = cur_size_read_back;
462
463 /*
464 * Now tell the device to switch. If this fails, then we need to
465 * revert the full resize.
466 */
467 ret = vmw_cotable_unscrub(res);
468 if (ret) {
469 DRM_ERROR("Failed switching COTable backup buffer.\n");
470 res->backup = old_buf;
471 res->backup_size = old_size;
472 vcotbl->size_read_back = old_size_read_back;
473 vmw_resource_mob_attach(res);
474 goto out_wait;
475 }
476
477 vmw_resource_mob_attach(res);
478 /* Let go of the old mob. */
479 vmw_bo_unreference(&old_buf);
480 res->id = vcotbl->type;
481
482 return 0;
483
484 out_map_new:
485 ttm_bo_kunmap(&old_map);
486 out_wait:
487 ttm_bo_unreserve(bo);
488 vmw_bo_unreference(&buf);
489
490 return ret;
491 }
492
493 /**
494 * vmw_cotable_create - Cotable resource create callback
495 *
496 * @res: Pointer to a cotable resource.
497 *
498 * There is no separate create command for cotables, so this callback, which
499 * is called before bind() in the validation sequence is instead used for two
500 * things.
501 * 1) Unscrub the cotable if it is scrubbed and still attached to a backup
502 * buffer.
503 * 2) Resize the cotable if needed.
504 */
vmw_cotable_create(struct vmw_resource * res)505 static int vmw_cotable_create(struct vmw_resource *res)
506 {
507 struct vmw_cotable *vcotbl = vmw_cotable(res);
508 size_t new_size = res->backup_size;
509 size_t needed_size;
510 int ret;
511
512 /* Check whether we need to resize the cotable */
513 needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
514 while (needed_size > new_size)
515 new_size *= 2;
516
517 if (likely(new_size <= res->backup_size)) {
518 if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) {
519 ret = vmw_cotable_unscrub(res);
520 if (ret)
521 return ret;
522 }
523 res->id = vcotbl->type;
524 return 0;
525 }
526
527 return vmw_cotable_resize(res, new_size);
528 }
529
530 /**
531 * vmw_hw_cotable_destroy - Cotable hw_destroy callback
532 *
533 * @res: Pointer to a cotable resource.
534 *
535 * The final (part of resource destruction) destroy callback.
536 */
vmw_hw_cotable_destroy(struct vmw_resource * res)537 static void vmw_hw_cotable_destroy(struct vmw_resource *res)
538 {
539 (void) vmw_cotable_destroy(res);
540 }
541
542 static size_t cotable_acc_size;
543
544 /**
545 * vmw_cotable_free - Cotable resource destructor
546 *
547 * @res: Pointer to a cotable resource.
548 */
vmw_cotable_free(struct vmw_resource * res)549 static void vmw_cotable_free(struct vmw_resource *res)
550 {
551 struct vmw_private *dev_priv = res->dev_priv;
552
553 kfree(res);
554 ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
555 }
556
557 /**
558 * vmw_cotable_alloc - Create a cotable resource
559 *
560 * @dev_priv: Pointer to a device private struct.
561 * @ctx: Pointer to the context resource.
562 * The cotable resource will not add a refcount.
563 * @type: The cotable type.
564 */
vmw_cotable_alloc(struct vmw_private * dev_priv,struct vmw_resource * ctx,u32 type)565 struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
566 struct vmw_resource *ctx,
567 u32 type)
568 {
569 struct vmw_cotable *vcotbl;
570 struct ttm_operation_ctx ttm_opt_ctx = {
571 .interruptible = true,
572 .no_wait_gpu = false
573 };
574 int ret;
575 u32 num_entries;
576
577 if (unlikely(cotable_acc_size == 0))
578 cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
579
580 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
581 cotable_acc_size, &ttm_opt_ctx);
582 if (unlikely(ret))
583 return ERR_PTR(ret);
584
585 vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
586 if (unlikely(!vcotbl)) {
587 ret = -ENOMEM;
588 goto out_no_alloc;
589 }
590
591 ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
592 vmw_cotable_free, &vmw_cotable_func);
593 if (unlikely(ret != 0))
594 goto out_no_init;
595
596 INIT_LIST_HEAD(&vcotbl->resource_list);
597 vcotbl->res.id = type;
598 vcotbl->res.backup_size = PAGE_SIZE;
599 num_entries = PAGE_SIZE / co_info[type].size;
600 if (num_entries < co_info[type].min_initial_entries) {
601 vcotbl->res.backup_size = co_info[type].min_initial_entries *
602 co_info[type].size;
603 vcotbl->res.backup_size =
604 (vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK;
605 }
606
607 vcotbl->scrubbed = true;
608 vcotbl->seen_entries = -1;
609 vcotbl->type = type;
610 vcotbl->ctx = ctx;
611
612 vcotbl->res.hw_destroy = vmw_hw_cotable_destroy;
613
614 return &vcotbl->res;
615
616 out_no_init:
617 kfree(vcotbl);
618 out_no_alloc:
619 ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
620 return ERR_PTR(ret);
621 }
622
623 /**
624 * vmw_cotable_notify - Notify the cotable about an item creation
625 *
626 * @res: Pointer to a cotable resource.
627 * @id: Item id.
628 */
vmw_cotable_notify(struct vmw_resource * res,int id)629 int vmw_cotable_notify(struct vmw_resource *res, int id)
630 {
631 struct vmw_cotable *vcotbl = vmw_cotable(res);
632
633 if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
634 DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
635 (unsigned) vcotbl->type, id);
636 return -EINVAL;
637 }
638
639 if (vcotbl->seen_entries < id) {
640 /* Trigger a call to create() on next validate */
641 res->id = -1;
642 vcotbl->seen_entries = id;
643 }
644
645 return 0;
646 }
647
648 /**
649 * vmw_cotable_add_view - add a view to the cotable's list of active views.
650 *
651 * @res: pointer struct vmw_resource representing the cotable.
652 * @head: pointer to the struct list_head member of the resource, dedicated
653 * to the cotable active resource list.
654 */
vmw_cotable_add_resource(struct vmw_resource * res,struct list_head * head)655 void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
656 {
657 struct vmw_cotable *vcotbl =
658 container_of(res, struct vmw_cotable, res);
659
660 list_add_tail(head, &vcotbl->resource_list);
661 }
662